Skip to content

Commit

Permalink
Formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
franflame committed Mar 25, 2024
1 parent 3e4ff22 commit bc3a020
Show file tree
Hide file tree
Showing 4 changed files with 55 additions and 45 deletions.
6 changes: 3 additions & 3 deletions foqus_lib/framework/surrogate/keras_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,11 +57,11 @@
BaseScaler,
LinearScaler,
LogScaler,
LogScaler2,
PowerScaler,
LogScaler2,
PowerScaler,
PowerScaler2,
map_name_to_scaler,
scale_dataframe
scale_dataframe,
)

# mapping between the human-readable name for the scaling variant
Expand Down
6 changes: 3 additions & 3 deletions foqus_lib/framework/surrogate/pytorch_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,11 @@
BaseScaler,
LinearScaler,
LogScaler,
LogScaler2,
PowerScaler,
LogScaler2,
PowerScaler,
PowerScaler2,
map_name_to_scaler,
scale_dataframe
scale_dataframe,
)

# custom class to define Keras NN layers
Expand Down
81 changes: 45 additions & 36 deletions foqus_lib/framework/surrogate/scaling.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@ def validate_for_scaling(array_in, lo, hi) -> None:
if lo == hi:
raise ValueError("Array must contain non-identical values")
if not check_under_or_overflow(array_in):
raise ValueError("Array contains under/overflow values for dtype")
raise ValueError("Array contains under/overflow values for dtype")


def check_under_or_overflow(arr):
if np.issubdtype(arr.dtype, np.integer):
Expand All @@ -28,7 +29,7 @@ def check_under_or_overflow(arr):
info = np.finfo(arr.dtype)
else:
raise ValueError("Unsupported data type")
max_value = info.max
max_value = info.max
min_value = info.min
return np.all(arr < max_value) & np.all(arr > min_value)

Expand All @@ -45,19 +46,20 @@ def scale_linear(array_in, lo=None, hi=None):
result = (array_in - lo) / (hi - lo)
return result


def scale_log(array_in, lo=None, hi=None):
# need to account for log domain
# need to account for log domain
if np.any(array_in <= 0):
raise ValueError("All values must be > 0 to use scale_log")
if lo is None:
lo = np.min(array_in)
if hi is None:
hi = np.max(array_in)
validate_for_scaling(array_in, lo, hi)
result = ((np.log10(array_in) - np.log10(lo))
/ (np.log10(hi) - np.log10(lo)))
result = (np.log10(array_in) - np.log10(lo)) / (np.log10(hi) - np.log10(lo))
return result


def scale_log2(array_in, lo=None, hi=None):
if lo is None:
lo = np.min(array_in)
Expand All @@ -66,71 +68,76 @@ def scale_log2(array_in, lo=None, hi=None):
validate_for_scaling(array_in, lo, hi)
result = np.log10(9 * (array_in - lo) / (hi - lo) + 1)
return result


# fix expected values in test


def scale_power(array_in, lo=None, hi=None):
if lo is None:
lo = np.min(array_in)
if hi is None:
hi = np.max(array_in)
validate_for_scaling(array_in, lo, hi)
result = (np.power(10, array_in) - np.power(10, lo)) / (np.power(10, hi) - np.power(10, lo))
result = (np.power(10, array_in) - np.power(10, lo)) / (
np.power(10, hi) - np.power(10, lo)
)
return result


def scale_power2(array_in, lo=None, hi=None):
if lo is None:
lo = np.min(array_in)
if hi is None:
hi = np.max(array_in)
validate_for_scaling(array_in, lo, hi)
result = (1/9 *
(np.power(10, (array_in - lo) / (hi - lo)) - 1)
)
result = 1 / 9 * (np.power(10, (array_in - lo) / (hi - lo)) - 1)
return result


def unscale_linear(array_in, lo, hi):
result = array_in * (hi - lo) / 1.0 + lo
return result


def unscale_log(array_in, lo, hi):
result = lo * np.power(hi / lo, array_in)

# result = ((np.log10(array_in) - np.log10(lo))
# / (np.log10(hi) - np.log10(lo)))
# out = math.pow(lo * (hi / lo), (array_in / 10.0))
# out = (
# 10
# * (math.log10(array_in) - math.log10(lo))
# / (math.log10(hi) - math.log10(lo))
# )
# out = math.pow(lo * (hi / lo), (array_in / 10.0))
# out = (
# 10
# * (math.log10(array_in) - math.log10(lo))
# / (math.log10(hi) - math.log10(lo))
# )
return result


def unscale_log2(array_in, lo=None, hi=None):
result = (np.power(10, array_in / 1.0) - 1) * (
hi - lo
) / 9.0 + lo
# out = (math.pow(10, array_in / 10.0) - 1) * (
# hi - lo
# ) / 9.0 + lo

result = (np.power(10, array_in / 1.0) - 1) * (hi - lo) / 9.0 + lo
# out = (math.pow(10, array_in / 10.0) - 1) * (
# hi - lo
# ) / 9.0 + lo

return result


def unscale_power(array_in, lo, hi):
# check if lo and hi were provided
# check if lo and hi were provided
# result = np.log10((array_in / 10.0) * (np.power(10, hi) - np.power(10, lo))
# + np.power(10, lo))
result = np.log10(
(array_in / 1.0) * (np.power(10, hi) - np.power(10, lo))
+ np.power(10, lo)
)
(array_in / 1.0) * (np.power(10, hi) - np.power(10, lo)) + np.power(10, lo)
)
return result


def unscale_power2(array_in, lo, hi):
result = (
np.log10(9.0 * array_in / 1.0 + 1) * (hi - lo) + lo
)
result = np.log10(9.0 * array_in / 1.0 + 1) * (hi - lo) + lo
return result


class BaseScaler:
# def __init__(self, data_array: np.ndarray):
# self.data = data_array
Expand All @@ -143,11 +150,7 @@ def fit(self, X: np.ndarray):
return self

def fit_transform(self, X: np.ndarray) -> np.ndarray:
return (
self
.fit(X)
.transform(X)
)
return self.fit(X).transform(X)

def transform(self, X: np.ndarray) -> np.ndarray:
raise NotImplementedError
Expand All @@ -162,44 +165,50 @@ def transform(self, X: np.ndarray) -> np.ndarray:

def inverse_transform(self, X: np.ndarray) -> np.ndarray:
return unscale_linear(X, self.lo_, self.hi_)



class LogScaler(BaseScaler):
def transform(self, X: np.ndarray) -> np.ndarray:
return scale_log(X, self.lo_, self.hi_)

def inverse_transform(self, X: np.ndarray) -> np.ndarray:
return unscale_log(X, self.lo_, self.hi_)


class LogScaler2(BaseScaler):
def transform(self, X: np.ndarray) -> np.ndarray:
return scale_log2(X, self.lo_, self.hi_)

def inverse_transform(self, X: np.ndarray) -> np.ndarray:
return unscale_log2(X, self.lo_, self.hi_)


class PowerScaler(BaseScaler):
def transform(self, X: np.ndarray) -> np.ndarray:
return scale_power(X, self.lo_, self.hi_)

def inverse_transform(self, X: np.ndarray) -> np.ndarray:
return unscale_power(X, self.lo_, self.hi_)


class PowerScaler2(BaseScaler):
def transform(self, X: np.ndarray) -> np.ndarray:
return scale_power2(X, self.lo_, self.hi_)

def inverse_transform(self, X: np.ndarray) -> np.ndarray:
return unscale_power2(X, self.lo_, self.hi_)


map_name_to_scaler = {
"Linear": LinearScaler(),
"Log": LogScaler(),
"Log2": LogScaler2(),
"Power": PowerScaler(),
"Power2": PowerScaler2(),
#...
# ...
}


def scale_dataframe(df: pd.DataFrame, scaler: BaseScaler) -> Tuple[pd.DataFrame, dict]:
scaled_df = pd.DataFrame(np.nan, columns=df.columns, index=df.index)
bounds = {}
Expand Down
7 changes: 4 additions & 3 deletions foqus_lib/framework/surrogate/scikit_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,13 +56,14 @@
BaseScaler,
LinearScaler,
LogScaler,
LogScaler2,
PowerScaler,
LogScaler2,
PowerScaler,
PowerScaler2,
map_name_to_scaler,
scale_dataframe
scale_dataframe,
)


def validate_training_data(xdata: np.ndarray, zdata: np.ndarray):
number_columns_in_xdata = xdata.shape[1]
number_columns_in_zdata = zdata.shape[1]
Expand Down

0 comments on commit bc3a020

Please sign in to comment.