Skip to content

Commit

Permalink
add case 2, update evaluation
Browse files Browse the repository at this point in the history
  • Loading branch information
RobertJaro committed Sep 30, 2022
1 parent e35fa31 commit ef1bb07
Show file tree
Hide file tree
Showing 4 changed files with 103 additions and 80 deletions.
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import numpy as np
from scipy import interpolate
from scipy.integrate import solve_ivp, solve_bvp
from scipy.integrate import solve_bvp


def _differential_equation(mu, u, n, a2):
Expand All @@ -19,7 +18,7 @@ def _differential_equation(mu, u, n, a2):
return (dP_dmu, d2P_dmu2)


def get_analytic_b_field(n=1, m=1, a2=0.425, l=0.3, psi=np.pi / 4, resolution=64):
def get_analytic_b_field(n=1, m=1, l=0.3, psi=np.pi / 4, resolution=64, bounds=[-1, 1, -1, 1, 0, 2]):
"""
Calculate the analytic NLFF field from Low & Lou (1989).
Expand All @@ -32,12 +31,13 @@ def get_analytic_b_field(n=1, m=1, a2=0.425, l=0.3, psi=np.pi / 4, resolution=64
:param bounds: dimensions of the volume (x_start, x_end, y_start, y_end, z_start, z_end)
:return: magnetic field B (x, y, z, v)
"""
sol_P = solve_P(n, m, a2)
sol_P, a2 = solve_P(n, m)

resolution = [resolution] * 3 if not isinstance(resolution, list) else resolution
coords = np.stack(np.meshgrid(np.linspace(-1, 1, resolution[1], dtype=np.float32),
np.linspace(-1, 1, resolution[0], dtype=np.float32),
np.linspace(0, 2, resolution[2], dtype=np.float32)), -1).transpose([1, 0, 2, 3])
coords = np.stack(np.meshgrid(np.linspace(bounds[0], bounds[1], resolution[1], dtype=np.float32),
np.linspace(bounds[2], bounds[3], resolution[0], dtype=np.float32),
np.linspace(bounds[4], bounds[5], resolution[2], dtype=np.float32)), -1).transpose(
[1, 0, 2, 3])

x, y, z = coords[..., 0], coords[..., 1], coords[..., 2]
X = x * np.cos(psi) - (z + l) * np.sin(psi)
Expand Down Expand Up @@ -74,16 +74,16 @@ def get_analytic_b_field(n=1, m=1, a2=0.425, l=0.3, psi=np.pi / 4, resolution=64
return b_field


def solve_P(n, m, a2, v0=10, P0=0):
def solve_P(n, m):
"""
Solve the differential equation from Low & Lou (1989).
:param n: variable (only n=1)
:param a2: eigenvalue
:param v0: start condition for dP/dmu
:param P0: boundary condition for P(-1) and P(1)
:return: interpolated functions for P and dP/dmu
"""

def f(x, y, p):
a2 = p[0]
d2P_dmu2 = -(n * (n + 1) * y[0] + a2 * (1 + n) / n * y[0] ** (1 + 2 / n)) / (1 - x ** 2 + 1e-6)
Expand All @@ -92,7 +92,6 @@ def f(x, y, p):
def f_boundary(Pa, Pb, p):
return np.array([Pa[0] - 0, Pb[0] - 0, Pa[1] - 10])


mu = np.linspace(-1, 1, num=256)

if m % 2 == 0:
Expand Down Expand Up @@ -120,4 +119,4 @@ def shooting(a2_init):
# get final solution
eval = shooting([eigenvalues[-1]])[0]

return eval.sol
return eval.sol, eval.p[0]
56 changes: 56 additions & 0 deletions nf2/evaluation/analytical_metrics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
import numpy as np

from nf2.evaluation.analytical_solution import get_analytic_b_field
from nf2.evaluation.unpack import load_cube
from nf2.potential.potential_field import get_potential
from nf2.train.metric import vector_norm, curl, divergence

base_path = '/gpfs/gpfs0/robert.jarolim/nf2/analytical_caseW_v3'

# CASE 1
# B = get_analytic_b_field()
# CASE 2
B = get_analytic_b_field(n = 1, m = 1, l=0.3, psi=np.pi * 0.15, resolution=[80, 80, 72])
b = load_cube(f'{base_path}/extrapolation_result.nf2')

# for CASE 2 crop central 64^3
B = B[8:-8, 8:-8, :64]
b = b[8:-8, 8:-8, :64]

c_vec = np.sum((B * b).sum(-1)) / np.sqrt((B ** 2).sum(-1).sum() * (b ** 2).sum(-1).sum())
M = np.prod(B.shape[:-1])
c_cs = 1 / M * np.sum((B * b).sum(-1) / vector_norm(B) / vector_norm(b))

E_n = vector_norm(b - B).sum() / vector_norm(B).sum()

E_m = 1 / M * (vector_norm(b - B) / vector_norm(B)).sum()

eps = (vector_norm(b) ** 2).sum() / (vector_norm(B) ** 2).sum()

B_potential = get_potential(B[:, :, 0, 2], 64)

eps_p = (vector_norm(b) ** 2).sum() / (vector_norm(B_potential) ** 2).sum()
eps_p_ll = (vector_norm(B) ** 2).sum() / (vector_norm(B_potential) ** 2).sum()

j = curl(b)
sig_J = (vector_norm(np.cross(j, b, -1)) / vector_norm(b)).sum() / vector_norm(j).sum()

L1 = (vector_norm(np.cross(j, b, -1)) ** 2 / vector_norm(b) ** 2).mean()
L2 = (divergence(b) ** 2).mean()

L1_B = (vector_norm(np.cross(curl(B), B, -1)) ** 2 / vector_norm(B) ** 2).mean()
L2_B = (divergence(B) ** 2).mean()

with open(f'{base_path}/evaluation.txt', 'w') as f:
print('c_vec', c_vec, file=f)
print('c_cs', c_cs, file=f)
print('E_n', 1 - E_n, file=f)
print('E_m', 1 - E_m, file=f)
print('eps', eps, file=f)
print('eps_P', eps_p, file=f)
print('eps_P_LL', eps_p_ll, file=f)
print('sig_J * 1e2', sig_J * 1e2, file=f)
print('L1', L1, file=f)
print('L2', L2, file=f)
print('L1_B', L1_B, file=f)
print('L2_B', L2_B, file=f)
37 changes: 0 additions & 37 deletions nf2/evaluation/evaluate_analytical.py

This file was deleted.

69 changes: 37 additions & 32 deletions nf2/train/analytic_extrapolate.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,7 @@
import json

import numpy as np
from matplotlib.colors import Normalize

from nf2.data.dataset import BoundaryDataset
from nf2.evaluation.analytical_solution import get_analytic_b_field
from nf2.train.trainer import NF2Trainer

Expand All @@ -25,7 +23,7 @@

# data parameters
spatial_norm = 64.
height = 64
height = 72 # 64
b_norm = 300
# model parameters
dim = args.dim
Expand All @@ -42,7 +40,13 @@

base_path = args.base_path

hmi_cube = get_analytic_b_field(n = 1, m = 1, l=0.3, psi=np.pi / 4)
# CASE 1
# hmi_cube = get_analytic_b_field(n = 1, m = 1, l=0.3, psi=np.pi /4)


# CASE 2
hmi_cube = get_analytic_b_field(n=1, m=1, l=0.3, psi=np.pi * 0.15, resolution=[80, 80, 72])

error_cube = np.zeros_like(hmi_cube)

# init trainer
Expand All @@ -52,33 +56,34 @@
decay_epochs=decay_epochs, num_workers=args.num_workers, meta_path=args.meta_path,
use_vector_potential=args.use_vector_potential)

coords = [
# z
np.stack(np.mgrid[:hmi_cube.shape[0], :hmi_cube.shape[1], :1], -1).reshape((-1, 3)),
np.stack(np.mgrid[:hmi_cube.shape[0], :hmi_cube.shape[1], hmi_cube.shape[2] - 1:hmi_cube.shape[2]],
-1).reshape((-1, 3)),
# y
np.stack(np.mgrid[:hmi_cube.shape[0], :1, :hmi_cube.shape[2]], -1).reshape((-1, 3)),
np.stack(np.mgrid[:hmi_cube.shape[0], hmi_cube.shape[1] - 1:hmi_cube.shape[1], :hmi_cube.shape[2]],
-1).reshape((-1, 3)),
# x
np.stack(np.mgrid[:1, :hmi_cube.shape[1], :hmi_cube.shape[2]], -1).reshape((-1, 3)),
np.stack(np.mgrid[hmi_cube.shape[0] - 1:hmi_cube.shape[0], :hmi_cube.shape[1], :hmi_cube.shape[2]],
-1).reshape((-1, 3)),
]
values = [hmi_cube[:, :, :1].reshape((-1, 3)), hmi_cube[:, :, -1:].reshape((-1, 3)),
hmi_cube[:, :1, :].reshape((-1, 3)), hmi_cube[:, -1:, :].reshape((-1, 3)),
hmi_cube[:1, :, :].reshape((-1, 3)), hmi_cube[-1:, :, :].reshape((-1, 3)), ]

coords = np.concatenate(coords).astype(np.float32)
values = np.concatenate(values).astype(np.float32)
err = np.zeros_like(values).astype(np.float32)

# normalize B field
values = Normalize(-b_norm, b_norm, clip=False)(values) * 2 - 1
values = np.array(values)

boundary_ds = BoundaryDataset(coords, values, err, spatial_norm)
# coords = [
# # z
# np.stack(np.mgrid[:hmi_cube.shape[0], :hmi_cube.shape[1], :1], -1).reshape((-1, 3)),
# np.stack(np.mgrid[:hmi_cube.shape[0], :hmi_cube.shape[1], hmi_cube.shape[2] - 1:hmi_cube.shape[2]],
# -1).reshape((-1, 3)),
# # y
# np.stack(np.mgrid[:hmi_cube.shape[0], :1, :hmi_cube.shape[2]], -1).reshape((-1, 3)),
# np.stack(np.mgrid[:hmi_cube.shape[0], hmi_cube.shape[1] - 1:hmi_cube.shape[1], :hmi_cube.shape[2]],
# -1).reshape((-1, 3)),
# # x
# np.stack(np.mgrid[:1, :hmi_cube.shape[1], :hmi_cube.shape[2]], -1).reshape((-1, 3)),
# np.stack(np.mgrid[hmi_cube.shape[0] - 1:hmi_cube.shape[0], :hmi_cube.shape[1], :hmi_cube.shape[2]],
# -1).reshape((-1, 3)),
# ]
# values = [hmi_cube[:, :, :1].reshape((-1, 3)), hmi_cube[:, :, -1:].reshape((-1, 3)),
# hmi_cube[:, :1, :].reshape((-1, 3)), hmi_cube[:, -1:, :].reshape((-1, 3)),
# hmi_cube[:1, :, :].reshape((-1, 3)), hmi_cube[-1:, :, :].reshape((-1, 3)), ]
#
# coords = np.concatenate(coords).astype(np.float32)
# values = np.concatenate(values).astype(np.float32)
# err = np.zeros_like(values).astype(np.float32)
#
# # normalize B field
# values = Normalize(-b_norm, b_norm, clip=False)(values) * 2 - 1
# values = np.array(values)
#
# boundary_ds = BoundaryDataset(coords, values, err, spatial_norm)
#
# trainer.boundary_ds = boundary_ds

trainer.boundary_ds = boundary_ds
trainer.train(epochs, batch_size, n_samples_epoch, log_interval, validation_interval)

0 comments on commit ef1bb07

Please sign in to comment.