-
Notifications
You must be signed in to change notification settings - Fork 9
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #2 from RobertJaro/analytic-solution
merge analytical magnetic field simulation
- Loading branch information
Showing
4 changed files
with
270 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -127,3 +127,6 @@ dmypy.json | |
|
||
# Pyre type checker | ||
.pyre/ | ||
/results/ | ||
/data/ | ||
/scripts/ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,122 @@ | ||
import numpy as np | ||
from scipy.integrate import solve_bvp | ||
|
||
|
||
def _differential_equation(mu, u, n, a2): | ||
""" | ||
The differential equation to solve for P | ||
:param mu: cos(theta) | ||
:param u: P function and derivative | ||
:param n: variable according to Low & Lou (1989) | ||
:param a2: eigenvalue | ||
""" | ||
P, dP = u | ||
dP_dmu = dP | ||
d2P_dmu2 = -(n * (n + 1) * P + a2 * (1 + n) / n * P ** (1 + 2 / n)) / (1 - mu ** 2 + 1e-8) | ||
return (dP_dmu, d2P_dmu2) | ||
|
||
|
||
def get_analytic_b_field(n=1, m=1, l=0.3, psi=np.pi / 4, resolution=64, bounds=[-1, 1, -1, 1, 0, 2]): | ||
""" | ||
Calculate the analytic NLFF field from Low & Lou (1989). | ||
:param n: variable see Low & Lou (1989), only works for n=1 | ||
:param m: used for generating a proper initial condition. | ||
:param a2: eigenvalue | ||
:param l: depth below the photosphere | ||
:param psi: angle of the magnetic field relative to the dipol axis | ||
:param resolution: spatial resolution of the magnetic field in pixels | ||
:param bounds: dimensions of the volume (x_start, x_end, y_start, y_end, z_start, z_end) | ||
:return: magnetic field B (x, y, z, v) | ||
""" | ||
sol_P, a2 = solve_P(n, m) | ||
|
||
resolution = [resolution] * 3 if not isinstance(resolution, list) else resolution | ||
coords = np.stack(np.meshgrid(np.linspace(bounds[0], bounds[1], resolution[1], dtype=np.float32), | ||
np.linspace(bounds[2], bounds[3], resolution[0], dtype=np.float32), | ||
np.linspace(bounds[4], bounds[5], resolution[2], dtype=np.float32)), -1).transpose( | ||
[1, 0, 2, 3]) | ||
|
||
x, y, z = coords[..., 0], coords[..., 1], coords[..., 2] | ||
X = x * np.cos(psi) - (z + l) * np.sin(psi) | ||
Y = y | ||
Z = x * np.sin(psi) + (z + l) * np.cos(psi) | ||
|
||
# to spherical coordinates | ||
xy = X ** 2 + Y ** 2 | ||
r = np.sqrt(xy + Z ** 2) | ||
theta = np.arctan2(np.sqrt(xy), Z) | ||
phi = np.arctan2(Y, X) | ||
|
||
mu = np.cos(theta) | ||
|
||
P, dP_dmu = sol_P(mu) | ||
A = P / r ** n | ||
dA_dtheta = -np.sin(theta) / (r ** n) * dP_dmu | ||
dA_dr = P * (-n * r ** (-n - 1)) | ||
Q = np.sqrt(a2) * A * np.abs(A) ** (1 / n) | ||
|
||
Br = (r ** 2 * np.sin(theta)) ** -1 * dA_dtheta | ||
Btheta = - (r * np.sin(theta)) ** -1 * dA_dr | ||
Bphi = (r * np.sin(theta)) ** -1 * Q | ||
|
||
BX = Br * np.sin(theta) * np.cos(phi) + Btheta * np.cos(theta) * np.cos(phi) - Bphi * np.sin(phi) | ||
BY = Br * np.sin(theta) * np.sin(phi) + Btheta * np.cos(theta) * np.sin(phi) + Bphi * np.cos(phi) | ||
BZ = Br * np.cos(theta) - Btheta * np.sin(theta) | ||
|
||
Bx = BX * np.cos(psi) + BZ * np.sin(psi) | ||
By = BY | ||
Bz = - BX * np.sin(psi) + BZ * np.cos(psi) | ||
|
||
b_field = np.real(np.stack([Bx, By, Bz], -1)) | ||
return b_field | ||
|
||
|
||
def solve_P(n, m): | ||
""" | ||
Solve the differential equation from Low & Lou (1989). | ||
:param n: variable (only n=1) | ||
:param v0: start condition for dP/dmu | ||
:param P0: boundary condition for P(-1) and P(1) | ||
:return: interpolated functions for P and dP/dmu | ||
""" | ||
|
||
def f(x, y, p): | ||
a2 = p[0] | ||
d2P_dmu2 = -(n * (n + 1) * y[0] + a2 * (1 + n) / n * y[0] ** (1 + 2 / n)) / (1 - x ** 2 + 1e-6) | ||
return [y[1], d2P_dmu2] | ||
|
||
def f_boundary(Pa, Pb, p): | ||
return np.array([Pa[0] - 0, Pb[0] - 0, Pa[1] - 10]) | ||
|
||
mu = np.linspace(-1, 1, num=256) | ||
|
||
if m % 2 == 0: | ||
init = np.cos(mu * (m + 1) * np.pi / 2) | ||
else: | ||
init = np.sin(mu * (m + 1) * np.pi / 2) | ||
|
||
dinit = 10 * np.ones_like(init) # | ||
initial = np.stack([init, dinit]) | ||
|
||
@np.vectorize | ||
def shooting(a2_init): | ||
eval = solve_bvp(f, f_boundary, x=mu, y=initial, p=[a2_init], verbose=0, tol=1e-6) | ||
if eval.success == False: | ||
return None | ||
return eval | ||
|
||
# use shooting to find eigenvalues | ||
evals = shooting(np.linspace(0, 10, 100, dtype=np.float32)) | ||
evals = [e for e in evals if e is not None] | ||
|
||
eigenvalues = np.array([e.p for e in evals]) | ||
eigenvalues = sorted(set(np.round(eigenvalues, 4).reshape((-1,)))) | ||
|
||
# get final solution | ||
eval = shooting([eigenvalues[-1]])[0] | ||
|
||
return eval.sol, eval.p[0] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,56 @@ | ||
import numpy as np | ||
|
||
from nf2.evaluation.analytical_field import get_analytic_b_field | ||
from nf2.evaluation.unpack import load_cube | ||
from nf2.potential.potential_field import get_potential | ||
from nf2.train.metric import vector_norm, curl, divergence | ||
|
||
base_path = '/gpfs/gpfs0/robert.jarolim/nf2/analytical_caseW_v3' | ||
|
||
# CASE 1 | ||
# B = get_analytic_b_field() | ||
# CASE 2 | ||
B = get_analytic_b_field(n = 1, m = 1, l=0.3, psi=np.pi * 0.15, resolution=[80, 80, 72]) | ||
b = load_cube(f'{base_path}/extrapolation_result.nf2') | ||
|
||
# for CASE 2 crop central 64^3 | ||
B = B[8:-8, 8:-8, :64] | ||
b = b[8:-8, 8:-8, :64] | ||
|
||
c_vec = np.sum((B * b).sum(-1)) / np.sqrt((B ** 2).sum(-1).sum() * (b ** 2).sum(-1).sum()) | ||
M = np.prod(B.shape[:-1]) | ||
c_cs = 1 / M * np.sum((B * b).sum(-1) / vector_norm(B) / vector_norm(b)) | ||
|
||
E_n = vector_norm(b - B).sum() / vector_norm(B).sum() | ||
|
||
E_m = 1 / M * (vector_norm(b - B) / vector_norm(B)).sum() | ||
|
||
eps = (vector_norm(b) ** 2).sum() / (vector_norm(B) ** 2).sum() | ||
|
||
B_potential = get_potential(B[:, :, 0, 2], 64) | ||
|
||
eps_p = (vector_norm(b) ** 2).sum() / (vector_norm(B_potential) ** 2).sum() | ||
eps_p_ll = (vector_norm(B) ** 2).sum() / (vector_norm(B_potential) ** 2).sum() | ||
|
||
j = curl(b) | ||
sig_J = (vector_norm(np.cross(j, b, -1)) / vector_norm(b)).sum() / vector_norm(j).sum() | ||
|
||
L1 = (vector_norm(np.cross(j, b, -1)) ** 2 / vector_norm(b) ** 2).mean() | ||
L2 = (divergence(b) ** 2).mean() | ||
|
||
L1_B = (vector_norm(np.cross(curl(B), B, -1)) ** 2 / vector_norm(B) ** 2).mean() | ||
L2_B = (divergence(B) ** 2).mean() | ||
|
||
with open(f'{base_path}/evaluation.txt', 'w') as f: | ||
print('c_vec', c_vec, file=f) | ||
print('c_cs', c_cs, file=f) | ||
print('E_n', 1 - E_n, file=f) | ||
print('E_m', 1 - E_m, file=f) | ||
print('eps', eps, file=f) | ||
print('eps_P', eps_p, file=f) | ||
print('eps_P_LL', eps_p_ll, file=f) | ||
print('sig_J * 1e2', sig_J * 1e2, file=f) | ||
print('L1', L1, file=f) | ||
print('L2', L2, file=f) | ||
print('L1_B', L1_B, file=f) | ||
print('L2_B', L2_B, file=f) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,89 @@ | ||
import argparse | ||
import json | ||
|
||
import numpy as np | ||
|
||
from nf2.evaluation.analytical_field import get_analytic_b_field | ||
from nf2.train.trainer import NF2Trainer | ||
|
||
parser = argparse.ArgumentParser() | ||
parser.add_argument('--config', type=str, required=True, | ||
help='config file for the simulation') | ||
parser.add_argument('--num_workers', type=int, required=False, default=4) | ||
parser.add_argument('--meta_path', type=str, required=False, default=None) | ||
parser.add_argument('--positional_encoding', action='store_true') | ||
parser.add_argument('--use_vector_potential', action='store_true') | ||
parser.add_argument('--n_samples_epoch', type=int, required=False, default=None) | ||
args = parser.parse_args() | ||
|
||
with open(args.config) as config: | ||
info = json.load(config) | ||
for key, value in info.items(): | ||
args.__dict__[key] = value | ||
|
||
# data parameters | ||
spatial_norm = 64. | ||
height = 72 # 64 | ||
b_norm = 300 | ||
# model parameters | ||
dim = args.dim | ||
# training parameters | ||
lambda_div = args.lambda_div | ||
lambda_ff = args.lambda_ff | ||
epochs = args.epochs | ||
decay_epochs = args.decay_epochs | ||
batch_size = int(args.batch_size) | ||
n_samples_epoch = int(batch_size * 100) if args.n_samples_epoch is None else args.n_samples_epoch | ||
log_interval = args.log_interval | ||
validation_interval = args.validation_interval | ||
potential = args.potential | ||
|
||
base_path = args.base_path | ||
|
||
# CASE 1 | ||
# hmi_cube = get_analytic_b_field(n = 1, m = 1, l=0.3, psi=np.pi /4) | ||
|
||
|
||
# CASE 2 | ||
hmi_cube = get_analytic_b_field(n=1, m=1, l=0.3, psi=np.pi * 0.15, resolution=[80, 80, 72]) | ||
|
||
error_cube = np.zeros_like(hmi_cube) | ||
|
||
# init trainer | ||
trainer = NF2Trainer(base_path, hmi_cube[:, :, 0], error_cube[:, :, 0], height, spatial_norm, b_norm, dim, | ||
positional_encoding=args.positional_encoding, | ||
potential_boundary=potential, lambda_div=lambda_div, lambda_ff=lambda_ff, | ||
decay_epochs=decay_epochs, num_workers=args.num_workers, meta_path=args.meta_path, | ||
use_vector_potential=args.use_vector_potential) | ||
|
||
# coords = [ | ||
# # z | ||
# np.stack(np.mgrid[:hmi_cube.shape[0], :hmi_cube.shape[1], :1], -1).reshape((-1, 3)), | ||
# np.stack(np.mgrid[:hmi_cube.shape[0], :hmi_cube.shape[1], hmi_cube.shape[2] - 1:hmi_cube.shape[2]], | ||
# -1).reshape((-1, 3)), | ||
# # y | ||
# np.stack(np.mgrid[:hmi_cube.shape[0], :1, :hmi_cube.shape[2]], -1).reshape((-1, 3)), | ||
# np.stack(np.mgrid[:hmi_cube.shape[0], hmi_cube.shape[1] - 1:hmi_cube.shape[1], :hmi_cube.shape[2]], | ||
# -1).reshape((-1, 3)), | ||
# # x | ||
# np.stack(np.mgrid[:1, :hmi_cube.shape[1], :hmi_cube.shape[2]], -1).reshape((-1, 3)), | ||
# np.stack(np.mgrid[hmi_cube.shape[0] - 1:hmi_cube.shape[0], :hmi_cube.shape[1], :hmi_cube.shape[2]], | ||
# -1).reshape((-1, 3)), | ||
# ] | ||
# values = [hmi_cube[:, :, :1].reshape((-1, 3)), hmi_cube[:, :, -1:].reshape((-1, 3)), | ||
# hmi_cube[:, :1, :].reshape((-1, 3)), hmi_cube[:, -1:, :].reshape((-1, 3)), | ||
# hmi_cube[:1, :, :].reshape((-1, 3)), hmi_cube[-1:, :, :].reshape((-1, 3)), ] | ||
# | ||
# coords = np.concatenate(coords).astype(np.float32) | ||
# values = np.concatenate(values).astype(np.float32) | ||
# err = np.zeros_like(values).astype(np.float32) | ||
# | ||
# # normalize B field | ||
# values = Normalize(-b_norm, b_norm, clip=False)(values) * 2 - 1 | ||
# values = np.array(values) | ||
# | ||
# boundary_ds = BoundaryDataset(coords, values, err, spatial_norm) | ||
# | ||
# trainer.boundary_ds = boundary_ds | ||
|
||
trainer.train(epochs, batch_size, n_samples_epoch, log_interval, validation_interval) |