Skip to content

Commit

Permalink
Merge remote-tracking branch 'sandia/devel' into njr/bifi-boosting
Browse files Browse the repository at this point in the history
  • Loading branch information
nrummel committed Jul 16, 2024
2 parents 9986448 + 6a66499 commit 74cdb00
Show file tree
Hide file tree
Showing 20 changed files with 85 additions and 61 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ jobs:
- name: Setup PyApprox
shell: bash -l {0}
run: |
pip install -e .
python -m pip install -e .
- name: Test PyApprox
shell: bash -l {0}
run: |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ jobs:
- name: Setup PyApprox Documentation
shell: bash -l {0}
run: |
pip install -e .[docs]
python -m pip install -e .[docs]
- name: Create PyApprox Documentation
shell: bash -l {0}
run: |
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/continuous-integration-workflow-docs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,11 +43,11 @@ jobs:
- name: Setup PyApprox
shell: bash -l {0}
run: |
pip install -e . --no-build-isolation
python -m pip install -e . --no-build-isolation
- name: Setup PyApprox Documentation
shell: bash -l {0}
run: |
pip install -e .[docs]
python -m pip install -e .[docs]
- name: Create PyApprox Documentation
shell: bash -l {0}
run: |
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/continuous-integration-workflow-pip.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ jobs:
python -m pip install --upgrade pip
- name: Setup PyApprox
run: |
pip install -e .
python -m pip install -e .
- name: Test PyApprox
run: |
pytest -s --cov-report term --cov=pyapprox
9 changes: 5 additions & 4 deletions environment.yml
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
name: pyapprox-base
channels:
- defaults
- pytorch
- conda-forge
- defaults
dependencies:
- python>=3.8
- numpy>=1.20
- python>=3.9
- numpy>=1.20,<=1.26.4
- matplotlib
- scipy
- jupyter
Expand All @@ -17,7 +18,7 @@ dependencies:
- Cython
- sympy
- networkx
- pytorch
- pytorch<=2.2.0
- numba
# make html in docs folder
- sphinx
Expand Down
2 changes: 1 addition & 1 deletion pyapprox/analysis/active_subspace.py
Original file line number Diff line number Diff line change
Expand Up @@ -436,7 +436,7 @@ def sort_2d_vertices_by_polar_angle(vertices):
# sort by polar angle
sorted_vertices = np.array(
sorted(vertices.T,
key=lambda p: np.math.atan2(p[1]-cent[1], p[0]-cent[0]))).T
key=lambda p: np.arctan2(p[1]-cent[1], p[0]-cent[0]))).T
return sorted_vertices


Expand Down
2 changes: 1 addition & 1 deletion pyapprox/analysis/tests/test_active_subspace.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ def test_moments_of_active_subspace(self):
(3*a**4+3*b**4+10*b**2*c**2+3*c**4+10*a**2*(b**2+c**2))/15.])
moments = moments[sorted_idx]
# ignore dummy values until I compute them analytically
II = np.where(true_moments != np.Inf)[0]
II = np.where(true_moments != np.inf)[0]
assert np.allclose(moments[II], true_moments[II])

def test_moments_of_active_subspace_II(self):
Expand Down
1 change: 0 additions & 1 deletion pyapprox/benchmarks/pde_benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,6 @@ def _setup_advection_diffusion_benchmark(
kle_mean_field=0.):
variable = IndependentMarginalsVariable([stats.norm(0, 1)]*nvars)
orders = np.asarray(orders, dtype=int)

domain_bounds = [0, 1, 0, 1]
mesh = CartesianProductCollocationMesh(domain_bounds, orders)
# bndry_conds = [
Expand Down
15 changes: 7 additions & 8 deletions pyapprox/cython/barycentric_interpolation.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import numpy as np


ctypedef np.double_t double_t
ctypedef np.int_t int_t
ctypedef np.int64_t int64_t


Expand Down Expand Up @@ -44,8 +43,8 @@ cpdef compute_barycentric_weights_1d_pyx(np.ndarray[double_t] samples, double C_
@cython.wraparound(False) # Deactivate negative indexing.
cpdef multivariate_hierarchical_barycentric_lagrange_interpolation_pyx(
double[:,:] x, double[:,:] fn_vals, np.ndarray[int64_t] active_dims,
int_t[:,:] active_abscissa_indices_1d, int_t[:] num_abscissa_1d,
int_t[:] num_active_abscissa_1d, int_t[:] shifts,
int64_t[:,:] active_abscissa_indices_1d, int64_t[:] num_abscissa_1d,
int64_t[:] num_active_abscissa_1d, int64_t[:] shifts,
double[:,:] abscissa_and_weights):

# active_dims is type unstable! Switches from int64 to int32
Expand All @@ -61,7 +60,7 @@ cpdef multivariate_hierarchical_barycentric_lagrange_interpolation_pyx(
Py_ssize_t num_act_dims = active_dims.shape[0]

Py_ssize_t max_num_abscissa_1d = abscissa_and_weights.shape[0]//2
int_t[:] multi_index = np.empty((num_act_dims), dtype=np.int_)
int64_t[:] multi_index = np.empty((num_act_dims), dtype=np.int_)

Py_ssize_t num_qoi = fn_vals.shape[1]

Expand All @@ -71,8 +70,8 @@ cpdef multivariate_hierarchical_barycentric_lagrange_interpolation_pyx(
# Allocate persistent memory. Each point will fill in a varying amount
# of entries. We use a view of this memory to stop reallocation for each
# data point
cdef int_t[:] act_dims_pt_persistent = np.empty((num_act_dims),dtype=np.int_)
cdef int_t[:] act_dim_indices_pt_persistent = np.empty(
cdef int64_t[:] act_dims_pt_persistent = np.empty((num_act_dims),dtype=np.int_)
cdef int64_t[:] act_dim_indices_pt_persistent = np.empty(
(num_act_dims),dtype=np.int_)

cdef:
Expand Down Expand Up @@ -199,7 +198,7 @@ cpdef multivariate_hierarchical_barycentric_lagrange_interpolation_pyx(
@cython.wraparound(False) # Deactivate negative indexing.
cpdef tensor_product_lagrange_interpolation_pyx(
double[:, :] x, double[:, :] fn_vals, double[:, :, :] basis_vals_1d,
int_t[:, :] active_indices, int_t[:] active_vars):
int64_t[:, :] active_indices, int64_t[:] active_vars):

cdef Py_ssize_t ii, jj, dd, kk
cdef Py_ssize_t nindices = active_indices.shape[1]
Expand All @@ -218,4 +217,4 @@ cpdef tensor_product_lagrange_interpolation_pyx(
for kk in range(nqoi):
values_view[ii, kk] += basis_val*fn_vals[jj, kk]
return values


4 changes: 2 additions & 2 deletions pyapprox/cython/utilities.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ cimport cython

fused_type = cython.fused_type(cython.numeric, np.float64_t)

ctypedef np.int_t int_t
ctypedef np.int64_t int64_t

@cython.cdivision(True) # Deactivate division by zero checking
Expand Down Expand Up @@ -55,6 +54,7 @@ cdef sub2ind_pyx(int [:] sizes, int [:] multi_index):
shift *= sizes[ii]
return scalar_index


@cython.cdivision(True) # Deactivate division by zero checking
@cython.boundscheck(False) # Deactivate bounds checking
@cython.wraparound(False) # Deactivate negative indexing.
Expand Down Expand Up @@ -225,7 +225,7 @@ cpdef outer_product_pyx(input_sets, fused_type element):
@cython.cdivision(True) # Deactivate division by zero checking
@cython.boundscheck(False) # Deactivate bounds checking
@cython.wraparound(False) # Deactivate negative indexing.
def halton_sequence_pyx(int64_t[:] primes, int_t index1, int_t index2):
def halton_sequence_pyx(int64_t[:] primes, int64_t index1, int64_t index2):
cdef:
Py_ssize_t ii, kk
int64_t summand
Expand Down
4 changes: 3 additions & 1 deletion pyapprox/expdesign/tests/test_linear_oed.py
Original file line number Diff line number Diff line change
Expand Up @@ -878,7 +878,7 @@ def help_check_michaelis_menten_model_minimax_optimal_design(
with increasing theta_2.
"""
iprint = 0
num_design_pts = 30
num_design_pts = 20
design_samples = np.linspace(1e-3, 1, num_design_pts)
# pred_samples = design_samples
pred_samples = np.linspace(0, 1, num_design_pts+10)
Expand Down Expand Up @@ -923,6 +923,8 @@ def noise_multiplier(p, x): return link_function(
constraint = minimax_opt_problem.setup_minimax_nonlinear_constraints(
parameter_samples[:, ii:ii+1], design_samples[None, :])
constraint_vals_II.append(z0[0]-constraint[0].fun(z0))
# print(constraint_vals_I)
# print(constraint_vals_II)
assert np.allclose(constraint_vals_I, constraint_vals_II)

opts = copy.deepcopy(opts)
Expand Down
20 changes: 17 additions & 3 deletions pyapprox/multifidelity/factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -555,7 +555,7 @@ def __call__(self, est_covariance, est):
if self._stat_type != "mean" and isinstance(
est._stat, MultiOutputMeanAndVariance):
return (
est_covariance[est.nqoi+self._qoi_idx,
est_covariance[est._nqoi+self._qoi_idx,
est._nqoi+self._qoi_idx])
elif (isinstance(
est._stat, (MultiOutputVariance, MultiOutputMean)) or
Expand All @@ -570,7 +570,7 @@ def __repr__(self):

def compute_variance_reductions(optimized_estimators,
criteria=ComparisonCriteria("det"),
nhf_samples=None):
nhf_samples=None, pilot_cost=None):
"""
Compute the variance reduction (relative to single model MC) for a
list of optimized estimtors.
Expand All @@ -597,6 +597,11 @@ def compute_variance_reductions(optimized_estimators,
evaluations that produce a estimator cost equal to the optimized
target cost of the estimator is used. Usually, nhf_samples should be
set to None.
pilot_cost : float
The cost of running the pilot study. if not None this is used to
determine the number of high-fidelity samples used by a single
fidelity MC study that does not need a pilot
"""
var_red, est_criterias, sf_criterias = [], [], []
optimized_estimators = optimized_estimators.copy()
Expand All @@ -606,7 +611,16 @@ def compute_variance_reductions(optimized_estimators,
est_criteria = criteria(est._covariance_from_npartition_samples(
est._rounded_npartition_samples), est)
if nhf_samples is None:
nhf_samples = int(est._rounded_target_cost/est._costs[0])
if pilot_cost is None:
nhf_samples = int(est._rounded_target_cost/est._costs[0])
else:
nhf_samples = int(
(est._rounded_target_cost+pilot_cost)/est._costs[0])
else:
if pilot_cost is not None:
msg = "pilot cost was specified even though nhf_samples was "
msg += "not None"
raise ValueError(msg)
sf_criteria = criteria(
est._stat.high_fidelity_estimator_covariance(
nhf_samples), est)
Expand Down
43 changes: 29 additions & 14 deletions pyapprox/pde/autopde/mesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,8 @@

from pyapprox.util.utilities import cartesian_product, outer_product
from pyapprox.surrogates.orthopoly.quadrature import gauss_jacobi_pts_wts_1D
from pyapprox.variables.transforms import _map_hypercube_samples
from pyapprox.surrogates.interp.barycentric_interpolation import (
compute_barycentric_weights_1d, barycentric_interpolation_1d
)
compute_barycentric_weights_1d, barycentric_interpolation_1d)
from pyapprox.util.visualization import plt, get_meshgrid_samples
from pyapprox.pde.autopde.mesh_transforms import ScaleAndTranslationTransform

Expand All @@ -23,16 +21,16 @@ def full_fun_axis_1(fill_val, xx, oned=True):

def chebyshev_derivative_matrix(order):
if order == 0:
pts = np.array([1], float)
derivative_matrix = np.array([0], float)
pts = np.array([1], dtype=float)
derivative_matrix = np.array([0], dtype=float)
else:
# this is reverse order used by matlab cheb function
pts = -np.cos(np.linspace(0., np.pi, order+1))
scalars = np.ones((order+1), float)
scalars = np.ones((order+1), dtype=float)
scalars[0] = 2.
scalars[order] = 2.
scalars[1:order+1:2] *= -1
derivative_matrix = np.empty((order+1, order+1), float)
derivative_matrix = np.empty((order+1, order+1), dtype=float)
for ii in range(order+1):
row_sum = 0.
for jj in range(order+1):
Expand Down Expand Up @@ -114,7 +112,7 @@ def fourier_basis(order, samples):
npts = (order+1)
h = 2*np.pi/npts
pts = h*np.arange(1, npts+1)
II = np.where(samples==2*np.pi)[0]
II = np.where(samples == 2*np.pi)[0]
samples[II] = 0
xx = samples[:, None]-pts[None, :]
vals = np.sin(np.pi*xx/h)/(2*np.pi/h*np.tan(xx/2))
Expand Down Expand Up @@ -223,7 +221,7 @@ def kronecker_product_2d(matrix1, matrix2):
assert matrix1.ndim == 2
block_num_rows = matrix1.shape[0]
matrix_num_rows = block_num_rows**2
matrix = np.empty((matrix_num_rows, matrix_num_rows), float)
matrix = np.empty((matrix_num_rows, matrix_num_rows), dtype=float)

# loop through blocks
start_col = 0
Expand Down Expand Up @@ -440,16 +438,33 @@ def _form_derivative_matrices(self):
compute_barycentric_weights_1d(xx) for xx in canonical_mesh_pts_1d]

if self.nphys_vars == 1:
canonical_deriv_mats = [canonical_deriv_mats_1d[0]]
canonical_deriv_mats = [
torch.as_tensor(
canonical_deriv_mats_1d[0], dtype=torch.double)]
else:
# assumes that 2d-mesh_pts varies in x1 faster than x2,
# e.g. points are
# [[x11,x21],[x12,x21],[x13,x12],[x11,x22],[x12,x22],...]

# The following fails with PyTorch 2.3.0
# I thought it may have been caused by converting numpy to tensor
# but this code suggests it is not that explicitly.
# What is confusing is this works in ipython as standalone code
# For now setting setup to only use pytorch<=2.2
# mat1 = torch.eye(31, dtype=torch.double)
# mat2 = torch.ones((31, 31), dtype=torch.double)
# C = torch.kron(mat1, mat2)
# print("A", C.shape)
canonical_deriv_mats = [
np.kron(np.eye(self._orders[1]+1), canonical_deriv_mats_1d[0]),
np.kron(canonical_deriv_mats_1d[1], np.eye(self._orders[0]+1))]
canonical_deriv_mats = [torch.as_tensor(mat, dtype=torch.double)
for mat in canonical_deriv_mats]
torch.kron(
torch.eye(self._orders[1]+1, dtype=torch.double),
torch.as_tensor(canonical_deriv_mats_1d[0],
dtype=torch.double)),
torch.kron(
torch.as_tensor(canonical_deriv_mats_1d[1],
dtype=torch.double),
torch.eye(self._orders[0]+1, dtype=torch.double))]

canonical_mesh_pts = cartesian_product(canonical_mesh_pts_1d)

return (canonical_mesh_pts_1d, canonical_deriv_mats_1d,
Expand Down
2 changes: 1 addition & 1 deletion pyapprox/pde/kle/_kle.py
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,7 @@ def _compute_basis(self):
# is that we cannot use autograd on quantities used to consturct K.
# but the need for this is unlikely
eig_vals, eig_vecs = eigh(
self._la_to_numpy(K), turbo=False,
self._la_to_numpy(K), # turbo=False,
subset_by_index=(K.shape[0]-self._nterms, K.shape[0]-1))
eig_vals = self._la_atleast1d(eig_vals)
eig_vecs = self._la_atleast2d(eig_vecs)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -887,7 +887,7 @@ def _check_multifidelity_gp(self, kernel_type, nmodels, nvars, degree):
train_values = [f(x) for f, x in zip(models, train_samples)]

gp = MultifidelityGaussianProcess(
kernel, alpha=1e-10, n_restarts_optimizer=0)
kernel, alpha=1e-10, n_restarts_optimizer=3)
gp.set_data(train_samples, train_values)
# print(gp.kernel.nsamples_per_model)

Expand Down
5 changes: 2 additions & 3 deletions pyapprox/surrogates/interp/barycentric_interpolation.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@
from pyapprox.util.utilities import cartesian_product
from pyapprox.util.pya_numba import njit
from pyapprox.util.sys_utilities import trace_error_with_msg
from pyapprox.util.visualization import (
get_meshgrid_function_data, create_3d_axis, mpl, plot_surface)


def compute_barycentric_weights_1d(samples, interval_length=None,
Expand Down Expand Up @@ -101,7 +99,8 @@ def barycentric_interpolation_1d(abscissa, weights, vals, eval_samples):
with warnings.catch_warnings():
# avoid division by zero warning
warnings.simplefilter("ignore")
return _barycentric_interpolation_1d(abscissa, weights, vals, eval_samples)
return _barycentric_interpolation_1d(
abscissa, weights, vals, eval_samples)


def barycentric_lagrange_interpolation_precompute(
Expand Down
1 change: 1 addition & 0 deletions pyapprox/util/linearalgebra/torchlinalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ def _la_isnan(self, mat) -> torch.Tensor:
return torch.isnan(mat)

def _la_atleast1d(self, val, dtype=torch.double) -> torch.Tensor:
print(val)
return torch.atleast_1d(
torch.as_tensor(val, dtype=dtype))

Expand Down
7 changes: 3 additions & 4 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ requires = [
"setuptools",
"wheel",
"Cython",
"numpy>=1.16.4",
"numpy>=1.20, <=1.26.4",
"scipy>=1.0.0",
]

Expand All @@ -27,13 +27,12 @@ classifiers=[
"Operating System :: OS Independent",
]
dependencies = [
'setuptools',
'numpy >= 1.16.4',
'numpy >= 1.20, <=1.26.4',
'matplotlib',
'scipy >= 1.0.0',
'Cython',
'sympy',
'torch',
'torch<=2.2.0',
'scikit-learn',
'coverage>=6.4',
'pytest-cov',
Expand Down
Loading

0 comments on commit 74cdb00

Please sign in to comment.