diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 64df5fac28..8bd70f352f 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -23,13 +23,14 @@ conftest.py @lbianchi-lbl /idaes/commands/ @dangunter # Core -/idaes/core/ @andrewlee94 @dallan-keylogic @lbianchi-lbl +/idaes/core/ @agarciadiego @dallan-keylogic @lbianchi-lbl /idaes/core/dmf/ @dangunter -/idaes/core/surrogate/ @andrewlee94 @bpaul4 @avdudchenko @rundxdi +/idaes/core/surrogate/ @bpaul4 @avdudchenko @rundxdi /idaes/core/ui/ @dangunter # Models -/idaes/models/ @andrewlee94 @bpaul4 +/idaes/models/ @agarciadiego @bpaul4 +/idaes/models/properties/ @agarciadiego @dallan-keylogic # Apps - each package needs a maintainer /idaes/apps/caprese/ @Robbybp diff --git a/idaes/core/scaling/custom_scaler_base.py b/idaes/core/scaling/custom_scaler_base.py index 8e94cc7c5a..a6b311086d 100644 --- a/idaes/core/scaling/custom_scaler_base.py +++ b/idaes/core/scaling/custom_scaler_base.py @@ -16,7 +16,6 @@ Author: Andrew Lee """ from copy import copy -from enum import Enum from pyomo.environ import ComponentMap, units, value from pyomo.core.base.units_container import UnitsError @@ -26,6 +25,7 @@ from idaes.core.scaling.scaling_base import CONFIG, ScalerBase from idaes.core.scaling.util import get_scaling_factor, NominalValueExtractionVisitor import idaes.logger as idaeslog +from idaes.core.util.misc import StrEnum # Set up logger _log = idaeslog.getLogger(__name__) @@ -41,7 +41,7 @@ } -class ConstraintScalingScheme(str, Enum): +class ConstraintScalingScheme(StrEnum): """ Schemes available for calculating constraint scaling factors. diff --git a/idaes/core/surrogate/keras_surrogate.py b/idaes/core/surrogate/keras_surrogate.py index 45a3a64a2a..1bc1feed62 100644 --- a/idaes/core/surrogate/keras_surrogate.py +++ b/idaes/core/surrogate/keras_surrogate.py @@ -17,23 +17,21 @@ # pylint: disable=missing-class-docstring # pylint: disable=missing-function-docstring -from enum import Enum import json import os.path -import numpy as np import pandas as pd from pyomo.common.dependencies import attempt_import -from idaes.core.surrogate.base.surrogate_base import SurrogateBase from idaes.core.surrogate.sampling.scaling import OffsetScaler +from idaes.core.surrogate.omlt_base_surrogate_class import OMLTSurrogate + keras, keras_available = attempt_import("tensorflow.keras") omlt, omlt_available = attempt_import("omlt") if omlt_available: - from omlt import OmltBlock, OffsetScaling from omlt.neuralnet.nn_formulation import ( FullSpaceSmoothNNFormulation, ReducedSpaceSmoothNNFormulation, @@ -45,7 +43,7 @@ from omlt.io import load_keras_sequential -class KerasSurrogate(SurrogateBase): +class KerasSurrogate(OMLTSurrogate): def __init__( self, keras_model, @@ -87,52 +85,11 @@ def __init__( input_labels=input_labels, output_labels=output_labels, input_bounds=input_bounds, + input_scaler=input_scaler, + output_scaler=output_scaler, ) - - # make sure we are using the standard scaler - if ( - input_scaler is not None - and not isinstance(input_scaler, OffsetScaler) - or output_scaler is not None - and not isinstance(output_scaler, OffsetScaler) - ): - raise NotImplementedError("KerasSurrogate only supports the OffsetScaler.") - - # check that the input labels match - if input_scaler is not None and input_scaler.expected_columns() != input_labels: - raise ValueError( - "KerasSurrogate created with input_labels that do not match" - " the expected columns in the input_scaler.\n" - "input_labels={}\n" - "input_scaler.expected_columns()={}".format( - input_labels, input_scaler.expected_columns() - ) - ) - - # check that the output labels match - if ( - output_scaler is not None - and output_scaler.expected_columns() != output_labels - ): - raise ValueError( - "KerasSurrogate created with output_labels that do not match" - " the expected columns in the output_scaler.\n" - "output_labels={}\n" - "output_scaler.expected_columns()={}".format( - output_labels, output_scaler.expected_columns() - ) - ) - - self._input_scaler = input_scaler - self._output_scaler = output_scaler self._keras_model = keras_model - class Formulation(Enum): - FULL_SPACE = 1 - REDUCED_SPACE = 2 - RELU_BIGM = 3 - RELU_COMPLEMENTARITY = 4 - def populate_block(self, block, additional_options=None): """ Method to populate a Pyomo Block with the keras model constraints. @@ -149,37 +106,7 @@ def populate_block(self, block, additional_options=None): formulation = additional_options.pop( "formulation", KerasSurrogate.Formulation.FULL_SPACE ) - offset_inputs = np.zeros(self.n_inputs()) - factor_inputs = np.ones(self.n_inputs()) - offset_outputs = np.zeros(self.n_outputs()) - factor_outputs = np.ones(self.n_outputs()) - if self._input_scaler: - offset_inputs = self._input_scaler.offset_series()[ - self.input_labels() - ].to_numpy() - factor_inputs = self._input_scaler.factor_series()[ - self.input_labels() - ].to_numpy() - if self._output_scaler: - offset_outputs = self._output_scaler.offset_series()[ - self.output_labels() - ].to_numpy() - factor_outputs = self._output_scaler.factor_series()[ - self.output_labels() - ].to_numpy() - - # build the OMLT scaler object - omlt_scaling = OffsetScaling( - offset_inputs=offset_inputs, - factor_inputs=factor_inputs, - offset_outputs=offset_outputs, - factor_outputs=factor_outputs, - ) - - # omlt takes *scaled* input bounds as a dictionary with int keys - input_bounds = dict(enumerate(self.input_bounds().values())) - scaled_input_bounds = omlt_scaling.get_scaled_input_expressions(input_bounds) - scaled_input_bounds = {i: tuple(bnd) for i, bnd in scaled_input_bounds.items()} + omlt_scaling, scaled_input_bounds = self.generate_omlt_scaling_objecets() net = load_keras_sequential( self._keras_model, @@ -201,32 +128,7 @@ def populate_block(self, block, additional_options=None): "KerasSurrogate.populate_block. Please pass a valid " "formulation.".format(formulation) ) - block.nn = OmltBlock() - block.nn.build_formulation( - formulation_object, - ) - - # input/output variables need to be constrained to be equal - # auto-created variables that come from OMLT. - input_idx_by_label = {s: i for i, s in enumerate(self._input_labels)} - input_vars_as_dict = block.input_vars_as_dict() - - @block.Constraint(self._input_labels) - def input_surrogate_ties(m, input_label): - return ( - input_vars_as_dict[input_label] - == block.nn.inputs[input_idx_by_label[input_label]] - ) - - output_idx_by_label = {s: i for i, s in enumerate(self._output_labels)} - output_vars_as_dict = block.output_vars_as_dict() - - @block.Constraint(self._output_labels) - def output_surrogate_ties(m, output_label): - return ( - output_vars_as_dict[output_label] - == block.nn.outputs[output_idx_by_label[output_label]] - ) + self.populate_block_with_net(block, formulation_object) def evaluate_surrogate(self, inputs): """ diff --git a/idaes/core/surrogate/omlt_base_surrogate_class.py b/idaes/core/surrogate/omlt_base_surrogate_class.py new file mode 100644 index 0000000000..6ae587d583 --- /dev/null +++ b/idaes/core/surrogate/omlt_base_surrogate_class.py @@ -0,0 +1,187 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Interface for importing ONNX models into IDAES +""" +# TODO: Missing docstrings +# pylint: disable=missing-class-docstring +# pylint: disable=missing-function-docstring + +from enum import Enum +import numpy as np + +from pyomo.common.dependencies import attempt_import + +from idaes.core.surrogate.base.surrogate_base import SurrogateBase +from idaes.core.surrogate.sampling.scaling import OffsetScaler + +keras, keras_available = attempt_import("tensorflow.keras") +omlt, omlt_available = attempt_import("omlt") + +if omlt_available: + from omlt import OmltBlock, OffsetScaling + + +class OMLTSurrogate(SurrogateBase): + def __init__( + self, + input_labels, + output_labels, + input_bounds, + input_scaler=None, + output_scaler=None, + ): + """ + Standard SurrogateObject for surrogates based on Keras models. + Utilizes the OMLT framework for importing Keras models to IDAES. + + Contains methods to both populate a Pyomo Block with constraints + representing the surrogate and to evaluate the surrogate a set of user + provided points. + + This constructor should only be used when first creating the surrogate within IDAES. + Once created, this object can be stored to disk using save_to_folder and loaded + with load_from_folder + + Args: + onnx_model: Onnx model file to be loaded. + input_labels: list of str + The ordered list of labels corresponding to the inputs in the keras model + output_labels: list of str + The ordered list of labels corresponding to the outputs in the keras model + input_bounds: None of dict of tuples + Keys correspond to each of the input labels and values are the tuples of + bounds (lb, ub) + input_scaler: None or OffsetScaler + The scaler to be used for the inputs. If None, then no scaler is used + output_scaler: None of OffsetScaler + The scaler to be used for the outputs. If None, then no scaler is used + """ + super().__init__( + input_labels=input_labels, + output_labels=output_labels, + input_bounds=input_bounds, + ) + + # make sure we are using the standard scaler + if ( + input_scaler is not None + and not isinstance(input_scaler, OffsetScaler) + or output_scaler is not None + and not isinstance(output_scaler, OffsetScaler) + ): + raise NotImplementedError("KerasSurrogate only supports the OffsetScaler.") + + # check that the input labels match + if input_scaler is not None and input_scaler.expected_columns() != input_labels: + raise ValueError( + "KerasSurrogate created with input_labels that do not match" + " the expected columns in the input_scaler.\n" + "input_labels={}\n" + "input_scaler.expected_columns()={}".format( + input_labels, input_scaler.expected_columns() + ) + ) + + # check that the output labels match + if ( + output_scaler is not None + and output_scaler.expected_columns() != output_labels + ): + raise ValueError( + "KerasSurrogate created with output_labels that do not match" + " the expected columns in the output_scaler.\n" + "output_labels={}\n" + "output_scaler.expected_columns()={}".format( + output_labels, output_scaler.expected_columns() + ) + ) + + self._input_scaler = input_scaler + self._output_scaler = output_scaler + + class Formulation(Enum): + FULL_SPACE = 1 + REDUCED_SPACE = 2 + RELU_BIGM = 3 + RELU_COMPLEMENTARITY = 4 + + def generate_omlt_scaling_objecets(self): + offset_inputs = np.zeros(self.n_inputs()) + factor_inputs = np.ones(self.n_inputs()) + offset_outputs = np.zeros(self.n_outputs()) + factor_outputs = np.ones(self.n_outputs()) + if self._input_scaler: + offset_inputs = self._input_scaler.offset_series()[ + self.input_labels() + ].to_numpy() + factor_inputs = self._input_scaler.factor_series()[ + self.input_labels() + ].to_numpy() + if self._output_scaler: + offset_outputs = self._output_scaler.offset_series()[ + self.output_labels() + ].to_numpy() + factor_outputs = self._output_scaler.factor_series()[ + self.output_labels() + ].to_numpy() + + omlt_scaling = OffsetScaling( + offset_inputs=offset_inputs, + factor_inputs=factor_inputs, + offset_outputs=offset_outputs, + factor_outputs=factor_outputs, + ) + + # omlt takes *scaled* input bounds as a dictionary with int keys + input_bounds = dict(enumerate(self.input_bounds().values())) + scaled_input_bounds = omlt_scaling.get_scaled_input_expressions(input_bounds) + scaled_input_bounds = {i: tuple(bnd) for i, bnd in scaled_input_bounds.items()} + return omlt_scaling, scaled_input_bounds + + def populate_block_with_net(self, block, formulation_object): + """ + Method to populate a Pyomo Block with the omlt model constraints and build its formulation. + + Args: + block: Pyomo Block component + The block to be populated with variables and/or constraints. + formulation_object: omlt loaded network formulation + """ + + block.nn = OmltBlock() + block.nn.build_formulation( + formulation_object, + ) + + # input/output variables need to be constrained to be equal + # auto-created variables that come from OMLT. + input_idx_by_label = {s: i for i, s in enumerate(self._input_labels)} + input_vars_as_dict = block.input_vars_as_dict() + + @block.Constraint(self._input_labels) + def input_surrogate_ties(m, input_label): + return ( + input_vars_as_dict[input_label] + == block.nn.inputs[input_idx_by_label[input_label]] + ) + + output_idx_by_label = {s: i for i, s in enumerate(self._output_labels)} + output_vars_as_dict = block.output_vars_as_dict() + + @block.Constraint(self._output_labels) + def output_surrogate_ties(m, output_label): + return ( + output_vars_as_dict[output_label] + == block.nn.outputs[output_idx_by_label[output_label]] + ) diff --git a/idaes/core/surrogate/onnx_surrogate.py b/idaes/core/surrogate/onnx_surrogate.py new file mode 100644 index 0000000000..52b559a89b --- /dev/null +++ b/idaes/core/surrogate/onnx_surrogate.py @@ -0,0 +1,257 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Interface for importing ONNX models into IDAES +""" +# TODO: Missing docstrings +# pylint: disable=missing-class-docstring +# pylint: disable=missing-function-docstring +# TODO: Importing protected _ACTIVATION_OP_TYPES as not exposed in distributed version +# pylint: disable=W0123 +from enum import Enum +import json +import os.path + +from pyomo.common.dependencies import attempt_import + +from idaes.core.surrogate.sampling.scaling import OffsetScaler + +from idaes.core.surrogate.omlt_base_surrogate_class import OMLTSurrogate + +onnx, onnx_available = attempt_import("onnx") +omlt, omlt_available = attempt_import("omlt") + +if omlt_available: + from omlt.neuralnet import ( + FullSpaceSmoothNNFormulation, + ReducedSpaceSmoothNNFormulation, + ReluBigMFormulation, + ReluComplementarityFormulation, + ) + import omlt.io as omltio + + if onnx_available: + from omlt.io import load_onnx_neural_network, write_onnx_model_with_bounds + + +class ONNXSurrogate(OMLTSurrogate): + def __init__( + self, + onnx_model, + input_labels, + output_labels, + input_bounds, + input_scaler=None, + output_scaler=None, + ): + """ + Standard SurrogateObject for surrogates based on ONNX models. + Utilizes the OMLT framework for importing ONNX models to IDAES. + + Contains methods to both populate a Pyomo Block with constraints + representing the surrogate and to evaluate the surrogate a set of user + provided points. + + This constructor should only be used when first creating the surrogate within IDAES. + Once created, this object can be stored to disk using save_to_folder and loaded + with load_from_folder + + Args: + onnx_model: Onnx model file to be loaded. + input_labels: list of str + The ordered list of labels corresponding to the inputs in the onnx model + output_labels: list of str + The ordered list of labels corresponding to the outputs in the onnx model + input_bounds: None of dict of tuples + Keys correspond to each of the input labels and values are the tuples of + bounds (lb, ub) + input_scaler: None or OffsetScaler + The scaler to be used for the inputs. If None, then no scaler is used + output_scaler: None of OffsetScaler + The scaler to be used for the outputs. If None, then no scaler is used + """ + super().__init__( + input_labels=input_labels, + output_labels=output_labels, + input_bounds=input_bounds, + input_scaler=input_scaler, + output_scaler=output_scaler, + ) + + self._onnx_model = onnx_model + + class Formulation(Enum): + FULL_SPACE = 1 + REDUCED_SPACE = 2 + RELU_BIGM = 3 + RELU_COMPLEMENTARITY = 4 + + def populate_block(self, block, additional_options=None): + """ + Method to populate a Pyomo Block with the onnx model constraints. + + Args: + block: Pyomo Block component + The block to be populated with variables and/or constraints. + additional_options: dict or None + If not None, then should be a dict with the following keys; + 'formulation': ONNXSurrogate.Formulation + The formulation to use with OMLT. Possible values are FULL_SPACE, + REDUCED_SPACE, RELU_BIGM, or RELU_COMPLEMENTARITY (default is FULL_SPACE) + """ + formulation = additional_options.pop( + "formulation", ONNXSurrogate.Formulation.REDUCED_SPACE + ) + omlt_scaling, scaled_input_bounds = self.generate_omlt_scaling_objecets() + + # omlt takes *scaled* input bounds as a dictionary with int keys + input_bounds = dict(enumerate(self.input_bounds().values())) + scaled_input_bounds = omlt_scaling.get_scaled_input_expressions(input_bounds) + scaled_input_bounds = {i: tuple(bnd) for i, bnd in scaled_input_bounds.items()} + + # TODO: remove this once new OMLT 1.2 is made available and includes tanh support + # overrides default available activation functions for ONNX, tanh is not listed in 1.1 but is supported + + omltio.onnx_parser._ACTIVATION_OP_TYPES = [ # pylint: disable=protected-access + "Relu", + "Sigmoid", + "LogSoftmax", + "Tanh", + ] + + net = load_onnx_neural_network( + self._onnx_model, + scaling_object=omlt_scaling, + input_bounds=scaled_input_bounds, + ) + + if formulation == ONNXSurrogate.Formulation.FULL_SPACE: + formulation_object = FullSpaceSmoothNNFormulation(net) + elif formulation == ONNXSurrogate.Formulation.REDUCED_SPACE: + formulation_object = ReducedSpaceSmoothNNFormulation(net) + elif formulation == ONNXSurrogate.Formulation.RELU_BIGM: + formulation_object = ReluBigMFormulation(net) + elif formulation == ONNXSurrogate.Formulation.RELU_COMPLEMENTARITY: + formulation_object = ReluComplementarityFormulation(net) + else: + raise ValueError( + 'An unrecognized formulation "{}" was passed to ' + "ONNXSurrogate.populate_block. Please pass a valid " + "formulation.".format(formulation) + ) + self.populate_block_with_net(block, formulation_object) + + def evaluate_surrogate(self, inputs): + """ + Method to evaluate ONNX model at a set of input values. + + Args: + inputs: numpy array of input values. First dimension of array + must match the number of input variables. + + Returns: + outputs: numpy array of values for all outputs evaluated at input + points. + """ + raise NotImplementedError + + def save_to_folder(self, save_location, save_name): + """ + Save the surrogate object to disk by providing the location to store the + model in and its name, as well as additional IDAES metadata + + Args: + save_location: str + The name of the folder to contain the ONNX model and additional + IDAES metadata + save_name: str + The name for the model + """ + + write_onnx_model_with_bounds( + os.path.join(save_location, "{}.onnx".format(save_name)), + onnx_model=self._onnx_model, + input_bounds=None, + ) + info = dict() + info["input_scaler"] = None + if self._input_scaler is not None: + info["input_scaler"] = self._input_scaler.to_dict() + info["output_scaler"] = None + if self._output_scaler is not None: + info["output_scaler"] = self._output_scaler.to_dict() + + # serialize information from the base class + info["input_labels"] = self.input_labels() + info["output_labels"] = self.output_labels() + info["input_bounds"] = self.input_bounds() + + with open( + os.path.join(save_location, "{}_idaes_info.json".format(save_name)), "w" + ) as fd: + json.dump(info, fd) + + @classmethod + def load_onnx_model(cls, onnx_model_location, model_name): + """ + Load the surrogate object from disk by providing the name of the + folder holding the onnx model and its name, including accompanying json file that includes following + structure: + + "input_scaler":{ + "expected_columns":[list of input_keys], + "offset":{"input_key":offset_value,etc.}, + "factor":{"input_key":factor_value (e.g. multiplier),etc.}} + + "output_scaler":{ + "expected_columns":[list of output_keys], + "offset":{"output_key":offset_value,etc.}, + "factor":{"output_key":factor_value (e.g. multiplier),etc.}} + + "input_bounds":{"input_key":[low_bound,high_bound],etc.} + "input_labels":[list of input_keys] + "output_labels":[list of output_keys] + + Args: + folder_name: str + The name of the folder containing the onnx model and additional + IDAES metadata + model_name: str + The name of the model to load in the folder + + Returns: an instance of ONNXSurrogate + """ + onnx_model = onnx.load( + os.path.join(onnx_model_location, "{}.onnx".format(model_name)) + ) + with open( + os.path.join(onnx_model_location, "{}_idaes_info.json".format(model_name)) + ) as fd: + info = json.load(fd) + + input_scaler = None + if info["input_scaler"] is not None: + input_scaler = OffsetScaler.from_dict(info["input_scaler"]) + + output_scaler = None + if info["output_scaler"] is not None: + output_scaler = OffsetScaler.from_dict(info["output_scaler"]) + + return ONNXSurrogate( + onnx_model=onnx_model, + input_labels=info["input_labels"], + output_labels=info["output_labels"], + input_bounds=info["input_bounds"], + input_scaler=input_scaler, + output_scaler=output_scaler, + ) diff --git a/idaes/core/surrogate/tests/data/onnx_models/net_st_net_5000_STM_100_s_2000000_60_5_tanh_1e-06_4096_tr_15481_Calcite_ST.onnx b/idaes/core/surrogate/tests/data/onnx_models/net_st_net_5000_STM_100_s_2000000_60_5_tanh_1e-06_4096_tr_15481_Calcite_ST.onnx new file mode 100644 index 0000000000..b0101b3ec0 Binary files /dev/null and b/idaes/core/surrogate/tests/data/onnx_models/net_st_net_5000_STM_100_s_2000000_60_5_tanh_1e-06_4096_tr_15481_Calcite_ST.onnx differ diff --git a/idaes/core/surrogate/tests/data/onnx_models/net_st_net_5000_STM_100_s_2000000_60_5_tanh_1e-06_4096_tr_15481_Calcite_ST_idaes_info.json b/idaes/core/surrogate/tests/data/onnx_models/net_st_net_5000_STM_100_s_2000000_60_5_tanh_1e-06_4096_tr_15481_Calcite_ST_idaes_info.json new file mode 100644 index 0000000000..d2970958d1 --- /dev/null +++ b/idaes/core/surrogate/tests/data/onnx_models/net_st_net_5000_STM_100_s_2000000_60_5_tanh_1e-06_4096_tr_15481_Calcite_ST_idaes_info.json @@ -0,0 +1 @@ +{"input_scaler": {"expected_columns": ["feed_pH", "pressure_bar_feed", "Na", "Cl", "Ca", "Mg", "HCO3", "SO4", "K", "Sr", "Ba", "HCl"], "offset": {"feed_pH": 4.0, "pressure_bar_feed": 1.0, "Na": 0.0, "Cl": 0.0, "Ca": 0.0, "Mg": 0.0, "HCO3": 0.0, "SO4": 0.0, "K": 0.0, "Sr": 0.0, "Ba": 0.0, "HCl": 0.0}, "factor": {"feed_pH": 8.0, "pressure_bar_feed": 405.32, "Na": 135.99, "Cl": 180.0, "Ca": 10.0, "Mg": 10.0, "HCO3": 10.0, "SO4": 100.0, "K": 40.0, "Sr": 10.0, "Ba": 0.1, "HCl": 2000.0}}, "input_bounds": {"feed_pH": [4.0, 12.0], "pressure_bar_feed": [1.0, 406.32], "Na": [0.0, 135.99], "Cl": [0.0, 180.0], "Ca": [0.0, 10.0], "Mg": [0.0, 10.0], "HCO3": [0.0, 10.0], "SO4": [0.0, 100.0], "K": [0.0, 40.0], "Sr": [0.0, 10.0], "Ba": [0.0, 0.1], "HCl": [0.0, 2000.0]}, "output_scaler": {"expected_columns": ["Calcite_ST"], "offset": {"Calcite_ST": 0.0}, "factor": {"Calcite_ST": 98.19}}, "input_labels": ["feed_pH", "pressure_bar_feed", "Na", "Cl", "Ca", "Mg", "HCO3", "SO4", "K", "Sr", "Ba", "HCl"], "output_labels": ["Calcite_ST"]} \ No newline at end of file diff --git a/idaes/core/surrogate/tests/test_onnx_surrogate.py b/idaes/core/surrogate/tests/test_onnx_surrogate.py new file mode 100644 index 0000000000..623254c285 --- /dev/null +++ b/idaes/core/surrogate/tests/test_onnx_surrogate.py @@ -0,0 +1,173 @@ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES). +# +# Copyright (c) 2018-2023 by the software owners: The Regents of the +# University of California, through Lawrence Berkeley National Laboratory, +# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon +# University, West Virginia University Research Corporation, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md +# for full copyright and license information. +################################################################################# +""" +Tests for ONNXSurrogate +""" +import pytest + +pytest.importorskip("onnx", reason="onnx not available") +pytest.importorskip("omlt", reason="omlt not available") + +import os.path +from pyomo.common.fileutils import this_file_dir +from pyomo.common.tempfiles import TempfileManager +from pyomo.environ import ( + ConcreteModel, + Var, + SolverFactory, + assert_optimal_termination, + value, +) +from idaes.core.surrogate.onnx_surrogate import ONNXSurrogate +from idaes.core.surrogate.surrogate_block import SurrogateBlock +from idaes.core.surrogate.sampling.scaling import OffsetScaler +import json +import onnx + +# onnx, onnx_available = attempt_import("onnx") +rtol = 1e-4 +atol = 1e-4 + + +def load_onnx_model_data( + name="net_st_net_5000_STM_100_s_2000000_60_5_tanh_1e-06_4096_tr_15481_Calcite_ST", +): + onnx_folder_name = os.path.join(this_file_dir(), "data", "onnx_models") + onnx_model = onnx.load(os.path.join(onnx_folder_name, "{}.onnx".format(name))) + with open(os.path.join(onnx_folder_name, "{}_idaes_info.json".format(name))) as fd: + scaler_info = json.load(fd) + + test_inputs = { + "vars": [ + "feed_pH", + "pressure_bar_feed", + "Na", + "Cl", + "Ca", + "Mg", + "HCO3", + "SO4", + "K", + "Sr", + "Ba", + "HCl", + ], + "feed_pH": 9.5, + "pressure_bar_feed": 1.01325, + "Na": 0.230858556000748, + "Cl": 0.106701648328453, + "Ca": 0.0245274696499109, + "Mg": 0.0311348703689873, + "HCO3": 0.430482141673564, + "SO4": 0.182204065, + "K": 0.000500561, + "Sr": 0.000761853, + "Ba": 2.50e-05, + "HCl": 10, + } + test_outputs = {"vars": ["Calcite_ST"], "Calcite_ST": 40.4270772546529} + return onnx_model, scaler_info, test_inputs, test_outputs + + +@pytest.mark.unit +@pytest.mark.skipif(not SolverFactory("ipopt").available(False), reason="no Ipopt") +def test_onnx_surrogate_manual_creation(): + ### + # Test 1->2 sigmoid + ### + onnx_model, scaler_info, test_inputs, test_outputs = load_onnx_model_data() + input_scaler = None + for key, items in scaler_info.items(): + print(key, items) + if scaler_info["input_scaler"] is not None: + input_scaler = OffsetScaler.from_dict(scaler_info["input_scaler"]) + + output_scaler = None + if scaler_info["output_scaler"] is not None: + output_scaler = OffsetScaler.from_dict(scaler_info["output_scaler"]) + onnx_surrogate = ONNXSurrogate( + onnx_model, + input_labels=scaler_info["input_labels"], + output_labels=scaler_info["output_labels"], + input_bounds=scaler_info["input_bounds"], + input_scaler=input_scaler, + output_scaler=output_scaler, + ) + + m = ConcreteModel() + + output_vars = ["Calcite_ST"] + m.inputs = Var(test_inputs["vars"]) + + m.outputs = Var(test_outputs["vars"]) + m.surrogate = SurrogateBlock() + m.surrogate.build_model( + surrogate_object=onnx_surrogate, + input_vars=[m.inputs[input_var] for input_var in test_inputs["vars"]], + output_vars=[m.outputs[output_var] for output_var in test_outputs["vars"]], + formulation=ONNXSurrogate.Formulation.REDUCED_SPACE, + ) + for key in test_inputs["vars"]: + m.inputs[key].fix(test_inputs[key]) + + solver = SolverFactory("ipopt") + status = solver.solve(m, tee=True) + assert_optimal_termination(status) + assert pytest.approx(test_outputs["Calcite_ST"], rel=1e-3) == value( + m.outputs["Calcite_ST"] + ) + + +@pytest.mark.unit +@pytest.mark.skipif(not SolverFactory("ipopt").available(False), reason="no Ipopt") +def test_onnx_surrogate_load_and_save_from_file(): + ### + # Test 1->2 sigmoid + ### + _, _, test_inputs, test_outputs = load_onnx_model_data() + + onnx_surrogate = ONNXSurrogate.load_onnx_model( + onnx_model_location=os.path.join(this_file_dir(), "data", "onnx_models"), + model_name="net_st_net_5000_STM_100_s_2000000_60_5_tanh_1e-06_4096_tr_15481_Calcite_ST", + ) + with TempfileManager.new_context() as tf: + dname = tf.mkdtemp() + onnx_surrogate.save_to_folder(dname, "temp_model") + + loaded_onnx_surrogate = ONNXSurrogate.load_onnx_model( + onnx_model_location=dname, + model_name="temp_model", + ) + assert not os.path.isdir(dname) + m = ConcreteModel() + + output_vars = ["Calcite_ST"] + m.inputs = Var(test_inputs["vars"]) + + m.outputs = Var(test_outputs["vars"]) + m.surrogate = SurrogateBlock() + m.surrogate.build_model( + surrogate_object=loaded_onnx_surrogate, + input_vars=[m.inputs[input_var] for input_var in test_inputs["vars"]], + output_vars=[m.outputs[output_var] for output_var in test_outputs["vars"]], + formulation=ONNXSurrogate.Formulation.REDUCED_SPACE, + ) + for key in test_inputs["vars"]: + m.inputs[key].fix(test_inputs[key]) + + solver = SolverFactory("ipopt") + status = solver.solve(m, tee=True) + assert_optimal_termination(status) + assert pytest.approx(test_outputs["Calcite_ST"], rel=1e-3) == value( + m.outputs["Calcite_ST"] + ) diff --git a/idaes/core/util/misc.py b/idaes/core/util/misc.py index 5daa73676d..78185256cd 100644 --- a/idaes/core/util/misc.py +++ b/idaes/core/util/misc.py @@ -15,9 +15,12 @@ This module contains miscellaneous utility functions for use in IDAES models. """ from enum import Enum +import sys import pyomo.environ as pyo from pyomo.common.config import ConfigBlock +from pyomo.core.expr.visitor import _ToStringVisitor +from pyomo.core.base.expression import ExpressionData import idaes.logger as idaeslog @@ -177,3 +180,66 @@ class StrEnum(str, Enum): def __str__(self): return str(self.value) + + +class _ToExprStringVisitor(_ToStringVisitor): + """ + Derived version of the Pyomo _ToStringVisitor class + which checks for named Expressions in the expression tree + and prints their name instead of expanding the expression tree. + """ + + def visiting_potential_leaf(self, node): + # Check if node is a named Expression + if isinstance(node, ExpressionData): + # If it is, return the name of the Expression + return True, node.name + + # Otherwise, continue descending as normal + return super().visiting_potential_leaf(node) + + +def compact_expression_to_string(expr): + """Return a compact string representation of an expression. + + Unlike the normal Pyomo string representations, this function will + identify any Expressions that appear within the expression tree and + print the name of these rather than expanding the expression tree. + + Args: + expr: ExpressionBase. The root node of an expression tree. + + Returns: + A string representation for the expression. + """ + # Create and execute the visitor pattern + # + visitor = _ToExprStringVisitor(verbose=False, smap=None) + return visitor.dfs_postorder_stack(expr) + + +def print_compact_form(expr, stream=None): + """ + Writes a compact string representation of the given component or + expression to the specified stream. + + Unlike the normal Pyomo string representations, this function will + identify any Expressions that appear within the expression tree and + print the name of these rather than expanding the expression tree. + + Args: + expr: component or expression to print + stream: StringIO object to write to. Default is stdout. + + Returns: + None + """ + if stream is None: + stream = sys.stdout + + if hasattr(expr, "expr"): + # We have a Constraint or Expression + # We want to print the expression, not the object name + expr = expr.expr + + stream.write(compact_expression_to_string(expr)) diff --git a/idaes/core/util/model_diagnostics.py b/idaes/core/util/model_diagnostics.py index c7f5335c26..f701677d47 100644 --- a/idaes/core/util/model_diagnostics.py +++ b/idaes/core/util/model_diagnostics.py @@ -20,10 +20,11 @@ from operator import itemgetter import sys from inspect import signature -from math import log, isclose, inf, isfinite +from math import log, log10, isclose, inf, isfinite import json from typing import List import logging +from itertools import combinations, chain import numpy as np from scipy.linalg import svd @@ -35,6 +36,7 @@ Integers, Block, check_optimal_termination, + ComponentMap, ConcreteModel, Constraint, Expression, @@ -58,6 +60,7 @@ from pyomo.core.base.block import BlockData from pyomo.core.base.var import VarData from pyomo.core.base.constraint import ConstraintData +from pyomo.core.base.expression import ExpressionData from pyomo.repn.standard_repn import ( # pylint: disable=no-name-in-module generate_standard_repn, ) @@ -67,6 +70,8 @@ ConfigValue, document_kwargs_from_configdict, PositiveInt, + NonNegativeFloat, + NonNegativeInt, ) from pyomo.util.check_units import identify_inconsistent_units from pyomo.contrib.incidence_analysis import IncidenceGraphInterface @@ -78,8 +83,12 @@ from pyomo.common.deprecation import deprecation_warning from pyomo.common.errors import PyomoException from pyomo.common.tempfiles import TempfileManager +from pyomo.core import expr as EXPR +from pyomo.common.numeric_types import native_types +from pyomo.core.base.units_container import _PyomoUnit from idaes.core.solvers.get_solver import get_solver +from idaes.core.util.misc import compact_expression_to_string from idaes.core.util.model_statistics import ( activated_blocks_set, deactivated_blocks_set, @@ -180,7 +189,6 @@ def svd_sparse(jacobian, number_singular_values): """ u, s, vT = svds(jacobian, k=number_singular_values, which="SM") - print(u, s, vT, number_singular_values) return u, s, vT.transpose() @@ -189,7 +197,7 @@ def svd_sparse(jacobian, number_singular_values): "variable_bounds_absolute_tolerance", ConfigValue( default=1e-4, - domain=float, + domain=NonNegativeFloat, description="Absolute tolerance for considering a variable to be close " "to its bounds.", ), @@ -198,7 +206,7 @@ def svd_sparse(jacobian, number_singular_values): "variable_bounds_relative_tolerance", ConfigValue( default=1e-4, - domain=float, + domain=NonNegativeFloat, description="Relative tolerance for considering a variable to be close " "to its bounds.", ), @@ -207,7 +215,7 @@ def svd_sparse(jacobian, number_singular_values): "variable_bounds_violation_tolerance", ConfigValue( default=0, - domain=float, + domain=NonNegativeFloat, description="Absolute tolerance for considering a variable to violate its bounds.", doc="Absolute tolerance for considering a variable to violate its bounds. " "Some solvers relax bounds on variables thus allowing a small violation to be " @@ -218,15 +226,47 @@ def svd_sparse(jacobian, number_singular_values): "constraint_residual_tolerance", ConfigValue( default=1e-5, - domain=float, + domain=NonNegativeFloat, description="Absolute tolerance to use when checking constraint residuals.", ), ) +CONFIG.declare( + "constraint_term_mismatch_tolerance", + ConfigValue( + default=1e6, + domain=NonNegativeFloat, + description="Magnitude difference to use when checking for mismatched additive terms in constraints.", + ), +) +CONFIG.declare( + "constraint_term_cancellation_tolerance", + ConfigValue( + default=1e-4, + domain=NonNegativeFloat, + description="Absolute tolerance to use when checking for canceling additive terms in constraints.", + ), +) +CONFIG.declare( + "max_canceling_terms", + ConfigValue( + default=5, + domain=NonNegativeInt, + description="Maximum number of terms to consider when looking for canceling combinations in expressions.", + ), +) +CONFIG.declare( + "constraint_term_zero_tolerance", + ConfigValue( + default=1e-10, + domain=NonNegativeFloat, + description="Absolute tolerance to use when determining if a constraint term is equal to zero.", + ), +) CONFIG.declare( "variable_large_value_tolerance", ConfigValue( default=1e4, - domain=float, + domain=NonNegativeFloat, description="Absolute tolerance for considering a value to be large.", ), ) @@ -234,7 +274,7 @@ def svd_sparse(jacobian, number_singular_values): "variable_small_value_tolerance", ConfigValue( default=1e-4, - domain=float, + domain=NonNegativeFloat, description="Absolute tolerance for considering a value to be small.", ), ) @@ -242,7 +282,7 @@ def svd_sparse(jacobian, number_singular_values): "variable_zero_value_tolerance", ConfigValue( default=1e-8, - domain=float, + domain=NonNegativeFloat, description="Absolute tolerance for considering a value to be near to zero.", ), ) @@ -250,7 +290,7 @@ def svd_sparse(jacobian, number_singular_values): "jacobian_large_value_caution", ConfigValue( default=1e4, - domain=float, + domain=NonNegativeFloat, description="Tolerance for raising a caution for large Jacobian values.", ), ) @@ -258,7 +298,7 @@ def svd_sparse(jacobian, number_singular_values): "jacobian_large_value_warning", ConfigValue( default=1e8, - domain=float, + domain=NonNegativeFloat, description="Tolerance for raising a warning for large Jacobian values.", ), ) @@ -266,7 +306,7 @@ def svd_sparse(jacobian, number_singular_values): "jacobian_small_value_caution", ConfigValue( default=1e-4, - domain=float, + domain=NonNegativeFloat, description="Tolerance for raising a caution for small Jacobian values.", ), ) @@ -274,7 +314,7 @@ def svd_sparse(jacobian, number_singular_values): "jacobian_small_value_warning", ConfigValue( default=1e-8, - domain=float, + domain=NonNegativeFloat, description="Tolerance for raising a warning for small Jacobian values.", ), ) @@ -290,7 +330,7 @@ def svd_sparse(jacobian, number_singular_values): "parallel_component_tolerance", ConfigValue( default=1e-8, - domain=float, + domain=NonNegativeFloat, description="Tolerance for identifying near-parallel Jacobian rows/columns", ), ) @@ -298,7 +338,7 @@ def svd_sparse(jacobian, number_singular_values): "absolute_feasibility_tolerance", ConfigValue( default=1e-6, - domain=float, + domain=NonNegativeFloat, description="Feasibility tolerance for identifying infeasible constraints and bounds", ), ) @@ -336,7 +376,7 @@ def svd_sparse(jacobian, number_singular_values): "singular_value_tolerance", ConfigValue( default=1e-6, - domain=float, + domain=NonNegativeFloat, description="Tolerance for defining a small singular value", ), ) @@ -344,7 +384,7 @@ def svd_sparse(jacobian, number_singular_values): "size_cutoff_in_singular_vector", ConfigValue( default=0.1, - domain=float, + domain=NonNegativeFloat, description="Size below which to ignore constraints and variables in " "the singular vector", ), @@ -371,7 +411,7 @@ def svd_sparse(jacobian, number_singular_values): "M", # TODO: Need better name ConfigValue( default=1e5, - domain=float, + domain=NonNegativeFloat, description="Maximum value for nu in MILP models.", ), ) @@ -379,7 +419,7 @@ def svd_sparse(jacobian, number_singular_values): "m_small", # TODO: Need better name ConfigValue( default=1e-5, - domain=float, + domain=NonNegativeFloat, description="Smallest value for nu to be considered non-zero in MILP models.", ), ) @@ -387,7 +427,7 @@ def svd_sparse(jacobian, number_singular_values): "trivial_constraint_tolerance", ConfigValue( default=1e-6, - domain=float, + domain=NonNegativeFloat, description="Tolerance for identifying non-zero rows in Jacobian.", ), ) @@ -1052,6 +1092,239 @@ def display_near_parallel_variables(self, stream=None): # TODO: Block triangularization analysis # Number and size of blocks, polynomial degree of 1x1 blocks, simple pivot check of moderate sized sub-blocks? + def _collect_constraint_mismatches(self, descend_into=True): + """ + Call ConstraintTermAnalysisVisitor on all Constraints in model to walk expression + tree and collect any instances of sum expressions with mismatched terms or potential + cancellations. + + Args: + descend_into: whether to descend_into child_blocks + + Returns: + List of strings summarising constraints with mismatched terms + List of strings summarising constraints with cancellations + List of strings with constraint names where constraint contains no free variables + """ + walker = ConstraintTermAnalysisVisitor( + term_mismatch_tolerance=self.config.constraint_term_mismatch_tolerance, + term_cancellation_tolerance=self.config.constraint_term_cancellation_tolerance, + term_zero_tolerance=self.config.constraint_term_zero_tolerance, + # for the high level summary, we only need to know if there are any cancellations, + # but don't need to find all of them + max_cancellations_per_node=1, + max_canceling_terms=self.config.max_canceling_terms, + ) + + mismatch = [] + cancellation = [] + constant = [] + + for c in self._model.component_data_objects( + Constraint, descend_into=descend_into + ): + _, expr_mismatch, expr_cancellation, expr_constant, _ = ( + walker.walk_expression(c.expr) + ) + + if len(expr_mismatch) > 0: + mismatch.append(f"{c.name}: {len(expr_mismatch)} mismatched term(s)") + + if len(expr_cancellation) > 0: + cancellation.append( + f"{c.name}: {len(expr_cancellation)} potential canceling term(s)" + ) + + if expr_constant: + constant.append(c.name) + + return mismatch, cancellation, constant + + def display_constraints_with_mismatched_terms(self, stream=None): + """ + Display constraints in model which contain additive terms of significantly different magnitude. + + Args: + stream: I/O object to write report to (default = stdout) + + Returns: + None + + """ + if stream is None: + stream = sys.stdout + + mismatch, _, _ = self._collect_constraint_mismatches() + + # Write the output + _write_report_section( + stream=stream, + lines_list=mismatch, + title="The following constraints have mismatched terms:", + end_line="Call display_problematic_constraint_terms(constraint) for more information.", + header="=", + footer="=", + ) + + def display_constraints_with_canceling_terms(self, stream=None): + """ + Display constraints in model which contain additive terms which potentially cancel each other. + + Note that this method looks a the current state of the constraint, and will flag terms as + cancelling if you have a form A == B + C where C is significantly smaller than A and B. In some + cases this behavior is intended, as C is a correction term which happens to be very + small at the current state. However, you should review these constraints to determine whether + the correction term is important for the situation you are modeling and consider removing the + term if it will never be significant. + + Args: + stream: I/O object to write report to (default = stdout) + + Returns: + None + + """ + if stream is None: + stream = sys.stdout + + _, canceling, _ = self._collect_constraint_mismatches() + + # Write the output + _write_report_section( + stream=stream, + lines_list=canceling, + title="The following constraints have canceling terms:", + end_line="Call display_problematic_constraint_terms(constraint) for more information.", + header="=", + footer="=", + ) + + def display_problematic_constraint_terms( + self, constraint, max_cancellations: int = 5, stream=None + ): + """ + Display a summary of potentially problematic terms in a given constraint. + + Note that this method looks a the current state of the constraint, and will flag terms as + cancelling if you have a form A == B + C where C is significantly smaller than A and B. In some + cases this behavior is intended, as C is a correction term which happens to be very + small at the current state. However, you should review these constraints to determine whether + the correction term is important for the situation you are modeling and consider removing the + term if it will never be significant. + + Args: + constraint: ConstraintData object to be examined + max_cancellations: maximum number of cancellations per node before termination. + None = find all cancellations. + stream: I/O object to write report to (default = stdout) + + Returns: + None + + """ + if stream is None: + stream = sys.stdout + + # Check that constraint is of correct type to give useful error message + if not isinstance(constraint, ConstraintData): + # Wrong type, check if it is an indexed constraint + if isinstance(constraint, Constraint): + raise TypeError( + f"{constraint.name} is an IndexedConstraint. Please provide " + f"an individual element of {constraint.name} (ConstraintData) " + "to be examined for problematic terms." + ) + else: + # Not a constraint + raise TypeError( + f"{constraint.name} is not an instance of a Pyomo Constraint." + ) + + walker = ConstraintTermAnalysisVisitor( + term_mismatch_tolerance=self.config.constraint_term_mismatch_tolerance, + term_cancellation_tolerance=self.config.constraint_term_cancellation_tolerance, + term_zero_tolerance=self.config.constraint_term_zero_tolerance, + max_cancellations_per_node=max_cancellations, + max_canceling_terms=self.config.max_canceling_terms, + ) + + _, expr_mismatch, expr_cancellation, _, tripped = walker.walk_expression( + constraint.expr + ) + + # Combine mismatches and cancellations into a summary list + issues = [] + for k, v in expr_mismatch.items(): + tag = " " + if isinstance(k, ExpressionData): + # For clarity, if the problem node is a named Expression, note this in output + tag = " Expression " + # Want to show full expression node plus largest and smallest magnitudes + issues.append( + f"Mismatch in{tag}{compact_expression_to_string(k)} (Max {v[0]}, Min {v[1]})" + ) + # Collect summary of cancelling terms for user + # Walker gives us back a list of nodes with cancelling terms + for k, v in expr_cancellation.items(): + # Each node may have multiple cancellations, these are given as separate tuples + tag = " " + if isinstance(k, ExpressionData): + # For clarity, if the problem node is a named Expression, note this in output + tag = " Expression " + for i in v: + # For each cancellation, iterate over contributing terms and write a summary + terms = "" + for j in i: + if len(terms) > 0: + terms += ", " + # +1 to switch from 0-index to 1-index + terms += f"{j[0]+1} ({j[1]})" + issues.append( + f"Cancellation in{tag}{compact_expression_to_string(k)}. Terms {terms}" + ) + + if tripped: + end_line = ( + f"Number of canceling terms per node limited to {max_cancellations}." + ) + else: + end_line = None + + # Write the output + _write_report_section( + stream=stream, + lines_list=issues, + title=f"The following terms in {constraint.name} are potentially problematic:", + end_line=end_line, + header="=", + footer="=", + ) + + def display_constraints_with_no_free_variables(self, stream=None): + """ + Display constraints in model which contain no free variables. + + Args: + stream: I/O object to write report to (default = stdout) + + Returns: + None + + """ + if stream is None: + stream = sys.stdout + + _, _, constant = self._collect_constraint_mismatches() + + # Write the output + _write_report_section( + stream=stream, + lines_list=constant, + title="The following constraints have no free variables:", + header="=", + footer="=", + ) + def _collect_structural_warnings( self, ignore_evaluation_errors=False, ignore_unit_consistency=False ): @@ -1335,6 +1608,28 @@ def _collect_numerical_cautions(self, jac=None, nlp=None): cstring = "Variable" cautions.append(f"Caution: {len(none_value)} {cstring} with None value") + # Constraints with possible ill-posed terms + mismatch, cancellation, constant = self._collect_constraint_mismatches() + if len(mismatch) > 0: + cstring = "Constraints" + if len(mismatch) == 1: + cstring = "Constraint" + cautions.append(f"Caution: {len(mismatch)} {cstring} with mismatched terms") + if len(cancellation) > 0: + cstring = "Constraints" + if len(cancellation) == 1: + cstring = "Constraint" + cautions.append( + f"Caution: {len(cancellation)} {cstring} with potential cancellation of terms" + ) + if len(constant) > 0: + cstring = "Constraints" + if len(constant) == 1: + cstring = "Constraint" + cautions.append( + f"Caution: {len(constant)} {cstring} with no free variables" + ) + # Extreme Jacobian rows and columns jac_col = extreme_jacobian_columns( jac=jac, @@ -2777,8 +3072,6 @@ def eq_degenerate(m_dh, v): # This variable does not appear in any constraint return Constraint.Skip - m_dh.pprint() - m_dh.degenerate = Constraint(m_dh.V, rule=eq_degenerate) # When y_pos = 1, nu >= m_small @@ -4122,3 +4415,457 @@ def _collect_model_statistics(model): ) return stats + + +class ConstraintTermAnalysisVisitor(EXPR.StreamBasedExpressionVisitor): + """ + Expression walker for checking Constraints for problematic terms. + + This walker will walk the expression and look for summation terms + with mismatched magnitudes or potential cancellations. + + Args: + term_mismatch_tolerance: tolerance to use when determining mismatched + terms + term_cancellation_tolerance: tolerance to use when identifying + possible cancellation of terms + term_zero_tolerance: tolerance for considering terms equal to zero + max_canceling_terms: maximum number of terms to consider when looking + for canceling combinations (None = consider all possible combinations) + max_cancellations_per_node: maximum number of cancellations to collect + for a single node. Collection will terminate when this many cancellations + have been identified (None = collect all cancellations) + + Returns: + list of values for top-level summation terms + list of terms with mismatched magnitudes + list of terms with potential cancellations + bool indicating whether expression is a constant + """ + + def __init__( + self, + term_mismatch_tolerance: float = 1e6, + term_cancellation_tolerance: float = 1e-4, + term_zero_tolerance: float = 1e-10, + max_canceling_terms: int = 4, + max_cancellations_per_node: int = 5, + ): + super().__init__() + + # Tolerance attributes + self._log_mm_tol = log10(term_mismatch_tolerance) + self._sum_tol = term_cancellation_tolerance + self._zero_tolerance = term_zero_tolerance + self._max_canceling_terms = max_canceling_terms + self._max_cancellations_per_node = max_cancellations_per_node + + # Placeholders for collecting results + self.canceling_terms = ComponentMap() + self.mismatched_terms = ComponentMap() + + # Flag for if cancellation collection hit limit + self._cancellation_tripped = False + + def _get_value_for_sum_subexpression(self, child_data): + # child_data is a tuple, with the 0-th element being the node values + if isinstance(child_data[0][0], str): + # Values may be a list containing a string in some cases (e.g. external functions) + # Return the string in this case + return child_data[0][0] + return sum(i for i in child_data[0]) + + def _generate_combinations(self, inputs, equality=False): + # We want to test all combinations of terms for cancellation + + # The number of combinations we check depends on whether this is an (in)equality + # expression or a sum node deeper in the expression tree. + # We expect (in)equalities to generally sum to 0 (0 == expr) thus we want to + # check if any subset of the sum terms sum to zero (i.e. are any terms unnecessary). + # For other sum nodes, we need to check for any combination of terms. + + # Maximum number of terms to include in combinations + max_comb = len(inputs) + if equality: + # Subtract 1 if (in)equality node + max_comb += -1 + # We also have a limit on the maximum number of terms to consider + if self._max_canceling_terms is not None: + max_comb = min(max_comb, self._max_canceling_terms) + + # Single terms cannot cancel, thus we want all combinations of length 2 to max terms + # Note the need for +1 due to way range works + combo_range = range(2, max_comb + 1) + + # Collect combinations of terms in an iterator + # In order to identify the terms in each cancellation, we will pair each value + # with its index in the input set as a tuple using enumerate + for i in chain.from_iterable( + combinations(enumerate(inputs), r) for r in combo_range + ): + # Yield each combination in the set + yield i + + def _check_sum_cancellations(self, values_list, equality=False): + # First, strip any terms with value 0 as they do not contribute to cancellation + # We do this to keep the number of possible combinations as small as possible + stripped = [i for i in values_list if abs(i) >= self._zero_tolerance] + + cancellations = [] + + if len(stripped) == 0: + # If the stripped list is empty, there are no non-zero terms + # We can stop here and return False as there are no possible cancellations + return cancellations + + # For scaling of tolerance, we want to compare to the largest absolute value of + # the input values + max_value = abs(max(stripped, key=abs)) + + for i in self._generate_combinations(stripped, equality): + # Generate combinations will return a list of combinations of input terms + # each element of the list will be a 2-tuple representing a term in the + # input list with the first value being the position in the input set and + # the second being the value + + # Check if the sum of values in the combination are below tolerance + if abs(sum(j[1] for j in i)) <= self._sum_tol * max_value: + # If so, record combination as canceling + cancellations.append(i) + + # Terminate loop if we have reached the max cancellations to collect + if len(cancellations) >= self._max_cancellations_per_node: + self._cancellation_tripped = True + break + + return cancellations + + def _perform_checks(self, node, child_data): + # Perform checks for problematic expressions + # First, need to check to see if any child data is a list + # This indicates a sum expression + const = True + + for d in child_data: + # We will check for canceling terms here, rather than the sum itself, to handle special cases + # We want to look for cases where a sum term results in a value much smaller + # than the terms of the sum + # Each element of child_data is a tuple where the 0-th element is the node values + if isinstance(d[0][0], str): + # Values may be a list containing a string in some cases (e.g. external functions) + # Skip if this is the case + pass + else: + for c in self._check_sum_cancellations(d[0]): + if node in self.canceling_terms.keys(): + self.canceling_terms[node].append(c) + else: + self.canceling_terms[node] = [c] + + # Expression is not constant if any child is not constant + # Element 1 is a bool indicating if the child node is constant + if not d[1]: + const = False + + # Return any problematic terms found + return const + + def _check_base_type(self, node): + if isinstance(node, VarData): + const = node.fixed + else: + const = True + return [value(node)], const, False + + def _check_equality_expression(self, node, child_data): + # (In)equality expressions are a special case of sum expressions + # child_data has two elements; 0 is the LHS of the (in)equality and 1 is the RHS + # Each of these then contains three elements; 0 is a list of values for the sum components, + # 1 is a bool indicating if the child node term is constant, and 2 is a bool indicating if + # the child ode is a sum expression. + + # First, to check for cancellation we need to negate one side of the expression + # mdata will contain the new set of child_data with negated values + mdata = [] + # child_data[0][0] has the values of the LHS of the (in)equality, and we will negate these + vals = [] + for j in child_data[0][0]: + vals.append(-j) + # Append the negated values along with whether the node is constant to mdata + mdata.append((vals, child_data[0][1])) + # child_data[1] is the RHS, so we take this as it appears and append to mdata + mdata.append(child_data[1]) + + # Next, call the method to check the sum expression + vals, const, _ = self._check_sum_expression(node, mdata) + + # Next, we need to check for canceling terms. + # child_data[x][2] indicates if a node is a sum expression or not + if not child_data[0][2] and not child_data[1][2]: + # If both sides are not sum expressions, we have the form a == b + # Simple lLinking constraints do not need to be checked + pass + # Next, we can ignore any term that has already been flagged as mismatched + elif node in self.mismatched_terms.keys(): + pass + # We can also ignore any case where one side of the (in)equality is constant + # I.e. if either child_node[x][1] is True + elif any(d[1] for d in mdata): + pass + else: + # Check for cancellation + # First, collect terms from both sides + # Note: outer loop comes first in list comprehension + t = [i for d in mdata for i in d[0]] + + # Then check for cancellations + for c in self._check_sum_cancellations(t, equality=True): + if node in self.canceling_terms.keys(): + self.canceling_terms[node].append(c) + else: + self.canceling_terms[node] = [c] + + return vals, const, False + + def _check_product_expr(self, node, child_data): + # We expect child_data to be a tuple of len 2 (a*b) + # If both or neither a and b are sums (xor), handle like other expressions + if not child_data[0][2] ^ child_data[1][2]: + return self._check_general_expr(node, child_data) + else: + # Here we have the case of a sum and a multiplier + # For this case, we will make the result look like a sum with the + # multiplier applied + # This is important for scaling of the sum terms, as cancellation should be + # Checked on the scaled value + + # First, check if both terms are constants - if so we can just get the value of + # this node and move on. + if child_data[0][1] and child_data[1][1]: + return self._check_general_expr(node, child_data) + + # The length of the values (child_data[i][0]) of the multiplier will be 1 + # We can just iterate over all terms in both value lists and multiply + # each pair + vals = [i * j for i in child_data[0][0] for j in child_data[1][0]] + + # Return the list of values, not constant, is a sum expression (apparent) + return vals, False, True + + def _check_div_expr(self, node, child_data): + # We expect child_data to be a tuple of len 2 (a/b) + # If the numerator is not a sum, handle like general expression + # child_data[0] is numerator, child_data[1] is denominator + if not child_data[0][2]: + return self._check_general_expr(node, child_data) + else: + # If the numerator is a sum, we will treat this as if the division + # were applied to each term in the numerator separately + # This is important for scaling of the sum terms, as cancellation should be + # Checked on the scaled value + + # First, check if the numerator is constant - if so we can just get the value of + # this node and move on. + if child_data[0][1]: + return self._check_general_expr(node, child_data) + + # Next, we need to get the value of the denominator + denom = self._get_value_for_sum_subexpression(child_data[1]) + + try: + vals = [i / denom for i in child_data[0][0]] + except ZeroDivisionError: + raise ZeroDivisionError( + f"Error in ConstraintTermAnalysisVisitor: found division with denominator of 0 " + f"({str(node)})." + ) + + # Return the list of values, not constant, is a sum expression (apparent) + return vals, False, True + + def _check_negation_expr(self, node, child_data): + # Here we need to defer checking of cancellations too due to different ways + # these can appear in an expression. + # We will simply negate all values on the child node values (child_data[0][0]) + # and pass on the rest. + vals = [-i for i in child_data[0][0]] + + # Return the list of values, not constant, is a sum expression (apparent) + return vals, child_data[0][1], child_data[0][2] + + def _check_general_expr(self, node, child_data): + const = self._perform_checks(node, child_data) + + try: + # pylint: disable-next=protected-access + val = node._apply_operation( + list(map(self._get_value_for_sum_subexpression, child_data)) + ) + except ValueError: + raise ValueError( + f"Error in ConstraintTermAnalysisVisitor: error evaluating {str(node)}." + ) + except ZeroDivisionError: + raise ZeroDivisionError( + f"Error in ConstraintTermAnalysisVisitor: found division with denominator of 0 " + f"({str(node)})." + ) + except Exception as err: + # Catch and re-raise any other error that occurs + _log.exception(f"Unexpected {err=}, {type(err)=}") + raise + + return [val], const, False + + def _check_other_expression(self, node, child_data): + const = self._perform_checks(node, child_data) + + # First, need to get value of input terms, which may be sub-expressions + input_mag = [] + for i in child_data: + input_mag.append(self._get_value_for_sum_subexpression(i)) + + # Next, create a copy of the function with expected magnitudes as inputs + newfunc = node.create_node_with_local_data(input_mag) + + # Evaluate new function and return the value along with check results + return [value(newfunc)], const, False + + def _check_ranged_expression(self, node, child_data): + # child_data should have 3 elements, LHS, middle term, and RHS + lhs_vals, lhs_const, _ = self._check_equality_expression(node, child_data[:2]) + rhs_vals, rhs_const, _ = self._check_equality_expression(node, child_data[1:]) + + # Constant is a bit vague in terms ranged expressions. + # We will call the term constant only if all parts are constant + const = lhs_const and rhs_const + + # For values, we need to avoid double counting the middle term + # Also for sign convention, we will negate the outer terms + vals = lhs_vals + [-rhs_vals[1]] + + return vals, const, False + + def _check_sum_expression(self, node, child_data): + # Sum expressions need special handling + # For sums, collect all child values into a list + vals = [] + # We will check for cancellation in this node at the next level + # Pyomo is generally good at simplifying compound sums + const = True + # Collect data from child nodes + for d in child_data: + vals.append(self._get_value_for_sum_subexpression(d)) + + # Expression is not constant if any child is not constant + # Element 1 is a bool indicating if node is constant + if not d[1]: + const = False + + # Check for mismatched terms + if len(vals) > 1: + absvals = [abs(v) for v in vals if abs(v) >= self._zero_tolerance] + if len(absvals) > 0: + vl = max(absvals) + vs = min(absvals) + if vl != vs: + if vs == 0: + diff = log10(vl) + else: + diff = log10(vl / vs) + + if diff >= self._log_mm_tol: + self.mismatched_terms[node] = (vl, vs) + + return vals, const, True + + node_type_method_map = { + EXPR.EqualityExpression: _check_equality_expression, + EXPR.InequalityExpression: _check_equality_expression, + EXPR.RangedExpression: _check_ranged_expression, + EXPR.SumExpression: _check_sum_expression, + EXPR.NPV_SumExpression: _check_sum_expression, + EXPR.ProductExpression: _check_product_expr, + EXPR.MonomialTermExpression: _check_product_expr, + EXPR.NPV_ProductExpression: _check_product_expr, + EXPR.DivisionExpression: _check_div_expr, + EXPR.NPV_DivisionExpression: _check_div_expr, + EXPR.PowExpression: _check_general_expr, + EXPR.NPV_PowExpression: _check_general_expr, + EXPR.NegationExpression: _check_negation_expr, + EXPR.NPV_NegationExpression: _check_negation_expr, + EXPR.AbsExpression: _check_general_expr, + EXPR.NPV_AbsExpression: _check_general_expr, + EXPR.UnaryFunctionExpression: _check_general_expr, + EXPR.NPV_UnaryFunctionExpression: _check_general_expr, + EXPR.Expr_ifExpression: _check_other_expression, + EXPR.ExternalFunctionExpression: _check_other_expression, + EXPR.NPV_ExternalFunctionExpression: _check_other_expression, + EXPR.LinearExpression: _check_sum_expression, + } + + def exitNode(self, node, data): + """ + Method to call when exiting node to check for potential issues. + """ + # Return [node values], constant (bool), sum expression (bool) + # first check if the node is a leaf + nodetype = type(node) + + if nodetype in native_types: + return [node], True, False + + node_func = self.node_type_method_map.get(nodetype, None) + if node_func is not None: + return node_func(self, node, data) + + if not node.is_expression_type(): + # this is a leaf, but not a native type + if nodetype is _PyomoUnit: + # Unit have no value, so return 1 as a placeholder + return [1], True, False + + # Var or Param + return self._check_base_type(node) + # might want to add other common types here + + # not a leaf - check if it is a named expression + if ( + hasattr(node, "is_named_expression_type") + and node.is_named_expression_type() + ): + return self._check_other_expression(node, data) + + raise TypeError( + f"An unhandled expression node type: {str(nodetype)} was encountered while " + f"analyzing constraint terms {str(node)}" + ) + + def walk_expression(self, expr): + """ + Main method to call to walk an expression and return analysis. + + Args: + expr - expression to be analyzed + + Returns: + list of values of top-level additive terms + ComponentSet containing any mismatched terms + ComponentSet containing any canceling terms + Bool indicating whether expression is a constant + """ + # Create new holders for collected terms + self.canceling_terms = ComponentMap() + self.mismatched_terms = ComponentMap() + + # Call parent walk_expression method + vals, const, _ = super().walk_expression(expr) + + # Return results + return ( + vals, + self.mismatched_terms, + self.canceling_terms, + const, + self._cancellation_tripped, + ) diff --git a/idaes/core/util/tests/test_misc.py b/idaes/core/util/tests/test_misc.py index 594b790e12..363fcd8df0 100644 --- a/idaes/core/util/tests/test_misc.py +++ b/idaes/core/util/tests/test_misc.py @@ -14,16 +14,19 @@ This module contains miscellaneous utility functions for use in IDAES models. """ +from io import StringIO import pytest import re -from pyomo.environ import ConcreteModel, Set, Block, Var, units +from pyomo.environ import ConcreteModel, Constraint, Expression, Set, Block, Var, units from pyomo.common.config import ConfigBlock from pyomo.core.base.units_container import UnitsError from idaes.core.util.misc import ( add_object_reference, set_param_from_config, + compact_expression_to_string, + print_compact_form, ) import idaes.logger as idaeslog @@ -361,3 +364,54 @@ def test_unitted_default(self, caplog): "b no units provided for parameter test_param - assuming " "default units" in caplog.text ) + + +class TestToExprStringVisitor: + @pytest.mark.unit + def test_compact_expression_to_string(self): + m = ConcreteModel() + m.v1 = Var() + m.v2 = Var() + m.v3 = Var() + + m.e1 = Expression(expr=m.v1 + m.v2) + m.c1 = Constraint(expr=0 == m.v3 * m.e1) + + str = compact_expression_to_string(m.c1.expr) + expected = "v3*e1 == 0" + + assert str == expected + + @pytest.mark.unit + def test_print_compact_form_expr(self): + m = ConcreteModel() + m.v1 = Var() + m.v2 = Var() + m.v3 = Var() + + m.e1 = Expression(expr=m.v1 + m.v2) + m.c1 = Constraint(expr=0 == m.v3 * m.e1) + + expected = "v3*e1 == 0" + + stream = StringIO() + print_compact_form(m.c1.expr, stream=stream) + + assert stream.getvalue() == expected + + @pytest.mark.unit + def test_print_compact_form_component(self): + m = ConcreteModel() + m.v1 = Var() + m.v2 = Var() + m.v3 = Var() + + m.e1 = Expression(expr=m.v1 + m.v2) + m.c1 = Constraint(expr=0 == m.v3 * m.e1) + + expected = "v3*e1 == 0" + + stream = StringIO() + print_compact_form(m.c1, stream=stream) + + assert stream.getvalue() == expected diff --git a/idaes/core/util/tests/test_model_diagnostics.py b/idaes/core/util/tests/test_model_diagnostics.py index ec5b671e34..e10f8a7c57 100644 --- a/idaes/core/util/tests/test_model_diagnostics.py +++ b/idaes/core/util/tests/test_model_diagnostics.py @@ -13,52 +13,67 @@ """ This module contains model diagnostic utility functions for use in IDAES (Pyomo) models. """ +from copy import deepcopy from io import StringIO import math -import numpy as np -import pytest -import re import os -from copy import deepcopy +import re +from unittest import TestCase +import numpy as np from pandas import DataFrame +import pytest from pyomo.environ import ( Block, ConcreteModel, Constraint, Expression, - log, - tan, - asin, - acos, - sqrt, + Expr_if, Objective, PositiveIntegers, Set, SolverFactory, - Suffix, - TransformationFactory, units, value, Var, assert_optimal_termination, Param, Integers, + exp, + log, + log10, + sin, + cos, + tan, + asin, + acos, + atan, + sqrt, + sinh, + cosh, + tanh, + asinh, + acosh, + atanh, ) from pyomo.common.collections import ComponentSet from pyomo.contrib.pynumero.asl import AmplInterface from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP from pyomo.common.fileutils import this_file_dir from pyomo.common.tempfiles import TempfileManager +from pyomo.core import expr as EXPR import idaes.core.util.scaling as iscale import idaes.logger as idaeslog from idaes.core.solvers import get_solver from idaes.core import FlowsheetBlock from idaes.core.util.testing import PhysicalParameterTestBlock -from unittest import TestCase - +from idaes.models.properties.modular_properties.eos.ceos_common import ( + cubic_roots_available, + CubicThermoExpressions, + CubicType as CubicEoS, +) from idaes.core.util.model_diagnostics import ( DiagnosticsToolbox, SVDToolbox, @@ -81,6 +96,7 @@ _collect_model_statistics, check_parallel_jacobian, compute_ill_conditioning_certificate, + ConstraintTermAnalysisVisitor, ) from idaes.core.util.parameter_sweep import ( SequentialSweepRunner, @@ -90,6 +106,7 @@ UniformSampling, ) from idaes.core.util.testing import _enable_scip_solver_for_testing +from idaes.models.properties import iapws95 __author__ = "Alex Dowling, Douglas Allan, Andrew Lee" @@ -999,6 +1016,232 @@ def test_display_near_parallel_variables(self): v1, v4 v2, v4 +==================================================================================== +""" + + assert stream.getvalue() == expected + + @pytest.mark.component + def test_collect_constraint_mismatches(self): + m = ConcreteModel() + m.v1 = Var(initialize=2) + m.v2 = Var(initialize=3) + + # Constraint with no free variables + m.c1 = Constraint(expr=m.v1 == m.v2) + m.v1.fix() + m.v2.fix() + + # Constraint with mismatched terms + m.v3 = Var(initialize=10) + m.v4 = Var(initialize=10) + m.v5 = Var(initialize=1e-6) + m.c2 = Constraint(expr=m.v3 == m.v4 + m.v5) + + # Constraint with cancellation + m.v6 = Var(initialize=10) + m.c3 = Constraint(expr=m.v6 == 10 + m.v3 - m.v4) + + dt = DiagnosticsToolbox(model=m) + + mismatch, cancellation, constant = dt._collect_constraint_mismatches() + + assert mismatch == ["c2: 1 mismatched term(s)"] + assert cancellation == [ + "c2: 1 potential canceling term(s)", + "c3: 1 potential canceling term(s)", + ] + assert constant == ["c1"] + + @pytest.mark.component + def test_display_problematic_constraint_terms(self): + m = ConcreteModel() + + # Constraint with mismatched terms + m.v3 = Var(initialize=10) + m.v4 = Var(initialize=10) + m.v5 = Var(initialize=1e-6) + m.c2 = Constraint(expr=m.v3 == m.v4 + m.v5) + + dt = DiagnosticsToolbox(model=m) + + stream = StringIO() + dt.display_problematic_constraint_terms(m.c2, stream=stream) + + expected = """==================================================================================== +The following terms in c2 are potentially problematic: + + Mismatch in v4 + v5 (Max 10, Min 1e-06) + Cancellation in v3 == v4 + v5. Terms 1 (-10), 2 (10) + +==================================================================================== +""" + + assert stream.getvalue() == expected + + @pytest.mark.component + def test_display_problematic_constraint_terms_limited(self): + m = ConcreteModel() + m.v1 = Var(initialize=10) + m.v2 = Var(initialize=-10) + m.v4 = Var(initialize=10) + + # Constraint with cancellation + m.v6 = Var(initialize=10) + m.c1 = Constraint(expr=m.v6 == 10 - m.v4 + m.v1 + m.v2) + + dt = DiagnosticsToolbox(model=m) + + stream = StringIO() + dt.display_problematic_constraint_terms(m.c1, stream=stream) + + expected = """==================================================================================== +The following terms in c1 are potentially problematic: + + Cancellation in v6 == 10 - v4 + v1 + v2. Terms 1 (-10), 2 (10) + Cancellation in v6 == 10 - v4 + v1 + v2. Terms 1 (-10), 4 (10) + Cancellation in v6 == 10 - v4 + v1 + v2. Terms 2 (10), 3 (-10) + Cancellation in v6 == 10 - v4 + v1 + v2. Terms 2 (10), 5 (-10) + Cancellation in v6 == 10 - v4 + v1 + v2. Terms 3 (-10), 4 (10) + +Number of canceling terms per node limited to 5. +==================================================================================== +""" + + assert stream.getvalue() == expected + + @pytest.mark.component + def test_display_problematic_constraint_terms_indexed_error(self): + m = ConcreteModel() + m.s = Set(initialize=[1, 2]) + m.v1 = Var(m.s, initialize=2) + m.v2 = Var(m.s, initialize=3) + + def _c_rule(b, i): + return b.v1[i] == b.v2[i] + + m.c1 = Constraint(m.s, rule=_c_rule) + + dt = DiagnosticsToolbox(model=m) + + # Check for indexed constraints + with pytest.raises( + TypeError, + match=re.escape( + "c1 is an IndexedConstraint. Please provide " + "an individual element of c1 (ConstraintData) " + "to be examined for problematic terms." + ), + ): + dt.display_problematic_constraint_terms(m.c1) + + # Check for not a constraints + with pytest.raises( + TypeError, match="v1 is not an instance of a Pyomo Constraint." + ): + dt.display_problematic_constraint_terms(m.v1) + + @pytest.mark.component + def test_display_problematic_constraint_terms_named_expression(self): + m = ConcreteModel() + + # Constraint with mismatched terms + m.v1 = Var(initialize=10) + m.v2 = Var(initialize=10) + m.v3 = Var(initialize=1e-6) + + m.e1 = Expression(expr=(m.v1 - m.v2)) + + m.c2 = Constraint(expr=m.v3 == m.e1**2) + + dt = DiagnosticsToolbox(model=m) + + stream = StringIO() + dt.display_problematic_constraint_terms(m.c2, stream=stream) + + expected = """==================================================================================== +The following terms in c2 are potentially problematic: + + Cancellation in Expression e1. Terms 1 (10), 2 (-10) + +==================================================================================== +""" + + assert stream.getvalue() == expected + + @pytest.mark.component + def test_display_constraints_with_mismatched_terms(self): + m = ConcreteModel() + # Constraint with mismatched terms + m.v3 = Var(initialize=10) + m.v4 = Var(initialize=10) + m.v5 = Var(initialize=1e-6) + m.c2 = Constraint(expr=m.v3 == m.v4 + m.v5) + + dt = DiagnosticsToolbox(model=m) + + stream = StringIO() + dt.display_constraints_with_mismatched_terms(stream=stream) + + expected = """==================================================================================== +The following constraints have mismatched terms: + + c2: 1 mismatched term(s) + +Call display_problematic_constraint_terms(constraint) for more information. +==================================================================================== +""" + + assert stream.getvalue() == expected + + @pytest.mark.component + def test_display_constraints_with_canceling_terms(self): + m = ConcreteModel() + # Constraint with mismatched terms + m.v3 = Var(initialize=10) + m.v4 = Var(initialize=10) + + # Constraint with cancellation + m.v6 = Var(initialize=10) + m.c3 = Constraint(expr=m.v6 == 10 + m.v3 - m.v4) + + dt = DiagnosticsToolbox(model=m) + + stream = StringIO() + dt.display_constraints_with_canceling_terms(stream=stream) + + expected = """==================================================================================== +The following constraints have canceling terms: + + c3: 1 potential canceling term(s) + +Call display_problematic_constraint_terms(constraint) for more information. +==================================================================================== +""" + + assert stream.getvalue() == expected + + @pytest.mark.component + def test_display_constraints_with_no_free_variables(self): + m = ConcreteModel() + m.v1 = Var(initialize=2) + m.v2 = Var(initialize=3) + + # Constraint with no free variables + m.c1 = Constraint(expr=m.v1 == m.v2) + m.v1.fix() + m.v2.fix() + + dt = DiagnosticsToolbox(model=m) + + stream = StringIO() + dt.display_constraints_with_no_free_variables(stream=stream) + + expected = """==================================================================================== +The following constraints have no free variables: + + c1 + ==================================================================================== """ @@ -1151,6 +1394,7 @@ def test_collect_numerical_cautions(self, model): dt = DiagnosticsToolbox(model=model.b) cautions = dt._collect_numerical_cautions() + assert len(cautions) == 5 assert ( "Caution: 2 Variables with value close to their bounds (abs=1.0E-04, rel=1.0E-04)" @@ -1440,6 +1684,58 @@ def test_report_numerical_issues(self, model): compute_infeasibility_explanation() display_variables_at_or_outside_bounds() +==================================================================================== +""" + + assert stream.getvalue() == expected + + @pytest.mark.component + def test_report_numerical_issues_cancellation(self): + model = ConcreteModel() + + model.v1 = Var(initialize=1) + model.v2 = Var(initialize=2) + model.v3 = Var(initialize=1e-8) + + # Non-problematic constraints + model.c1 = Constraint(expr=2 * model.v1 == model.v2) + + # Problematic constraints + model.c10 = Constraint(expr=0 == exp(model.v1 - 0.5 * model.v2)) + model.c11 = Constraint(expr=0 == model.v1 - 0.5 * model.v2 + model.v3) + + dt = DiagnosticsToolbox(model=model) + + stream = StringIO() + dt.report_numerical_issues(stream) + + expected = """==================================================================================== +Model Statistics + + Jacobian Condition Number: Undefined (Exactly Singular) + +------------------------------------------------------------------------------------ +3 WARNINGS + + WARNING: 1 Constraint with large residuals (>1.0E-05) + WARNING: 1 pair of constraints are parallel (to tolerance 1.0E-08) + WARNING: 1 pair of variables are parallel (to tolerance 1.0E-08) + +------------------------------------------------------------------------------------ +3 Cautions + + Caution: 1 Variable with value close to zero (tol=1.0E-08) + Caution: 1 Constraint with mismatched terms + Caution: 1 Constraint with potential cancellation of terms + +------------------------------------------------------------------------------------ +Suggested next steps: + + display_constraints_with_large_residuals() + compute_infeasibility_explanation() + display_near_parallel_constraints() + display_near_parallel_variables() + ==================================================================================== """ @@ -2267,7 +2563,6 @@ def test_build_model(self, model): ca = IpoptConvergenceAnalysis(model) clone = ca._build_model() - clone.pprint() assert clone is not model assert isinstance(clone.v1, Var) @@ -4053,3 +4348,1057 @@ def test_output(self, model): Constraints / bounds in guards for stability: """ assert expected in stream.getvalue() + + +class TestConstraintTermAnalysisVisitor: + @pytest.mark.unit + def test_sum_combinations(self): + # Check method to generate sums of all combinations of terms + # excludes single term sums + terms = [1, 2, 3, 4, 5] + visitor = ConstraintTermAnalysisVisitor(max_canceling_terms=None) + sums = [i for i in visitor._generate_combinations(terms)] + + print(sums) + + expected = [ + ((0, 1), (1, 2)), + ((0, 1), (2, 3)), + ((0, 1), (3, 4)), + ((0, 1), (4, 5)), + ((1, 2), (2, 3)), + ((1, 2), (3, 4)), + ((1, 2), (4, 5)), + ((2, 3), (3, 4)), + ((2, 3), (4, 5)), + ((3, 4), (4, 5)), + ((0, 1), (1, 2), (2, 3)), + ((0, 1), (1, 2), (3, 4)), + ((0, 1), (1, 2), (4, 5)), + ((0, 1), (2, 3), (3, 4)), + ((0, 1), (2, 3), (4, 5)), + ((0, 1), (3, 4), (4, 5)), + ((1, 2), (2, 3), (3, 4)), + ((1, 2), (2, 3), (4, 5)), + ((1, 2), (3, 4), (4, 5)), + ((2, 3), (3, 4), (4, 5)), + ((0, 1), (1, 2), (2, 3), (3, 4)), + ((0, 1), (1, 2), (2, 3), (4, 5)), + ((0, 1), (1, 2), (3, 4), (4, 5)), + ((0, 1), (2, 3), (3, 4), (4, 5)), + ((1, 2), (2, 3), (3, 4), (4, 5)), + ((0, 1), (1, 2), (2, 3), (3, 4), (4, 5)), + ] + + assert sums == expected + + @pytest.mark.unit + def test_sum_combinations_limited_default(self): + # Check method to generate sums of all combinations of terms + # excludes single term sums + terms = [1, 2, 3, 4, 5] + visitor = ConstraintTermAnalysisVisitor() + sums = [i for i in visitor._generate_combinations(terms)] + + expected = expected = [ + ((0, 1), (1, 2)), + ((0, 1), (2, 3)), + ((0, 1), (3, 4)), + ((0, 1), (4, 5)), + ((1, 2), (2, 3)), + ((1, 2), (3, 4)), + ((1, 2), (4, 5)), + ((2, 3), (3, 4)), + ((2, 3), (4, 5)), + ((3, 4), (4, 5)), + ((0, 1), (1, 2), (2, 3)), + ((0, 1), (1, 2), (3, 4)), + ((0, 1), (1, 2), (4, 5)), + ((0, 1), (2, 3), (3, 4)), + ((0, 1), (2, 3), (4, 5)), + ((0, 1), (3, 4), (4, 5)), + ((1, 2), (2, 3), (3, 4)), + ((1, 2), (2, 3), (4, 5)), + ((1, 2), (3, 4), (4, 5)), + ((2, 3), (3, 4), (4, 5)), + ((0, 1), (1, 2), (2, 3), (3, 4)), + ((0, 1), (1, 2), (2, 3), (4, 5)), + ((0, 1), (1, 2), (3, 4), (4, 5)), + ((0, 1), (2, 3), (3, 4), (4, 5)), + ((1, 2), (2, 3), (3, 4), (4, 5)), + # 5 term combination should be excluded + ] + + assert sums == expected + + @pytest.mark.unit + def test_check_sum_cancellations(self): + terms = [1, -2, 3, -4, 5] + visitor = ConstraintTermAnalysisVisitor() + cancellations = visitor._check_sum_cancellations(terms) + + # We expect to canceling combinations + # Results should be a list with each entry being a tuple containing a + # canceling combination + # In turn, each element of the tuple should be a 2-tuple with the + # position of the term in the input list and its value + expected = [((0, 1), (2, 3), (3, -4)), ((0, 1), (1, -2), (3, -4), (4, 5))] + + assert cancellations == expected + + @pytest.mark.unit + def test_int(self): + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=7) + + assert vv == [7] + assert len(mm) == 0 + assert len(cc) == 0 + assert k + assert not tr + + @pytest.mark.unit + def test_float(self): + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=7.7) + + assert vv == [7.7] + assert len(mm) == 0 + assert len(cc) == 0 + assert k + assert not tr + + @pytest.mark.unit + def test_scalar_param(self): + m = ConcreteModel() + m.scalar_param = Param(initialize=1, mutable=True) + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression( + expr=m.scalar_param + ) + + assert vv == [1] + assert len(mm) == 0 + assert len(cc) == 0 + assert k + assert not tr + + @pytest.mark.unit + def test_indexed_param(self): + m = ConcreteModel() + m.indexed_param = Param(["a", "b"], initialize=1, mutable=True) + + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression( + expr=m.indexed_param["a"] + ) + assert vv == [1] + assert len(mm) == 0 + assert len(cc) == 0 + assert k + assert not tr + + @pytest.mark.unit + def test_scalar_var(self): + m = ConcreteModel() + m.scalar_var = Var(initialize=1) + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression( + expr=m.scalar_var + ) + + assert vv == [1] + assert len(mm) == 0 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_indexed_var(self): + m = ConcreteModel() + m.indexed_var = Var(["a", "b"], initialize=1) + + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression( + expr=m.indexed_var["a"] + ) + assert vv == [1] + assert len(mm) == 0 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_scalar_var_fixed(self): + m = ConcreteModel() + m.scalar_var = Var(initialize=1) + m.scalar_var.fix() + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression( + expr=m.scalar_var + ) + + assert vv == [1] + assert len(mm) == 0 + assert len(cc) == 0 + assert k + assert not tr + + @pytest.mark.unit + def test_indexed_var_fixed(self): + m = ConcreteModel() + m.indexed_var = Var(["a", "b"], initialize=1) + m.indexed_var.fix() + + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression( + expr=m.indexed_var["a"] + ) + assert vv == [1] + assert len(mm) == 0 + assert len(cc) == 0 + assert k + assert not tr + + @pytest.mark.unit + def test_equality_expr(self): + m = ConcreteModel() + m.v1 = Var(initialize=1e-7) + m.v2 = Var(initialize=1e7) + + expr = m.v1 == m.v2 + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [-1e-7, 1e7] + assert expr in mm + assert len(mm) == 1 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_equality_expr_constant(self): + m = ConcreteModel() + m.v1 = Var(initialize=1e-7) + m.v2 = Var(initialize=1e7) + + # Fix v1, not constant yet as v2 still free + m.v1.fix() + + expr = m.v1 == m.v2 + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [-1e-7, 1e7] + assert expr in mm + assert len(mm) == 1 + assert len(cc) == 0 + assert not k + assert not tr + + # Fix v2, now constant + m.v2.fix() + + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [-1e-7, 1e7] + assert expr in mm + assert len(mm) == 1 + assert len(cc) == 0 + assert k + assert not tr + + @pytest.mark.unit + def test_sum_expr(self): + m = ConcreteModel() + m.v1 = Var(["a", "b", "c"], initialize=1e7) + m.v1["a"].set_value(1e-7) + + expr = sum(m.v1[i] for i in m.v1) + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [1e-7, 1e7, 1e7] + assert expr in mm + assert len(mm) == 1 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_sum_expr_constant(self): + m = ConcreteModel() + m.v1 = Var(["a", "b", "c"], initialize=1e7) + m.v1["a"].set_value(1e-7) + m.v1.fix() + + expr = sum(m.v1[i] for i in m.v1) + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [1e-7, 1e7, 1e7] + assert expr in mm + assert len(mm) == 1 + assert len(cc) == 0 + assert k + assert not tr + + @pytest.fixture(scope="class") + def model(self): + m = ConcreteModel() + m.v1 = Var(initialize=2) + m.v2 = Var(initialize=3) + m.v3 = Var(initialize=5.0000001) + m.v4 = Var(initialize=5) + + return m + + @pytest.mark.unit + def test_product_expr(self, model): + m = ConcreteModel() + expr = model.v1 * model.v2 * model.v3 + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [pytest.approx(30.0000006, rel=1e-8)] + assert len(mm) == 0 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_product_sum_expr(self, model): + expr = (model.v1 + model.v2) * (model.v3 + model.v4) + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [pytest.approx((2 + 3) * (5.0000001 + 5), rel=1e-8)] + assert len(mm) == 0 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_product_sum_expr_w_negation(self, model): + expr = (model.v1 + model.v2) * (model.v3 - model.v4) + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [pytest.approx(0.0000005, rel=1e-8)] + assert len(mm) == 0 + assert expr in cc + assert len(cc) == 1 + assert not k + assert not tr + + @pytest.mark.unit + def test_division_expr(self, model): + expr = model.v1 / model.v2 / model.v3 + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [pytest.approx(2 / 3 / 5.0000001, rel=1e-8)] + assert len(mm) == 0 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_division_sum_expr(self, model): + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression( + expr=(model.v1 + model.v2) / (model.v3 + model.v4) + ) + + assert vv == [ + pytest.approx(2 / (5.0000001 + 5), rel=1e-8), + pytest.approx(3 / (5.0000001 + 5), rel=1e-8), + ] + assert len(mm) == 0 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_division_sum_expr_w_negation(self, model): + expr = (model.v1 - model.v2) / (model.v3 - model.v4) + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [ + pytest.approx(2 / 0.0000001, rel=1e-8), + pytest.approx(-3 / 0.0000001, rel=1e-8), + ] + assert len(mm) == 0 + # Check for cancellation should be deferred + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_division_sum_expr_w_zero_denominator(self): + m = ConcreteModel() + m.v1 = Var(initialize=2) + m.v2 = Var(initialize=3) + m.v3 = Var(initialize=5) + m.v4 = Var(initialize=5) + + expr = (m.v1 - m.v2) / (m.v3 - m.v4) + with pytest.raises( + ZeroDivisionError, + match=re.escape( + "Error in ConstraintTermAnalysisVisitor: found division with denominator of 0 " + "((v1 - v2)/(v3 - v4))." + ), + ): + ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + @pytest.mark.unit + def test_division_sum_expr_w_negation_deferred(self): + m = ConcreteModel() + m.v1 = Var(initialize=2) + m.v2 = Var(initialize=2) + m.v3 = Var(initialize=5.0000001) + m.v4 = Var(initialize=5) + + expr = ((m.v1 - m.v2) / (m.v3 - m.v4)) ** 2 + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [pytest.approx(0, abs=1e-8)] + assert len(mm) == 0 + assert expr in cc + assert len(cc) == 1 + assert not k + assert not tr + + @pytest.mark.unit + def test_division_expr_error(self): + m = ConcreteModel() + m.v1 = Var(initialize=2) + m.v2 = Var(initialize=0) + + with pytest.raises( + ZeroDivisionError, + match=re.escape( + "Error in ConstraintTermAnalysisVisitor: found division with " + "denominator of 0 (v1/v2)." + ), + ): + ConstraintTermAnalysisVisitor().walk_expression(expr=m.v1 / m.v2) + + @pytest.mark.unit + def test_pow_expr(self, model): + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression( + expr=model.v1**model.v2 + ) + + assert vv == [8] + assert len(mm) == 0 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_pow_sum_expr(self, model): + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression( + expr=(model.v1 + model.v2) ** (model.v3 + model.v4) + ) + + assert vv == [pytest.approx(5**10.0000001, rel=1e-8)] + assert len(mm) == 0 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_pow_sum_expr_w_negation(self, model): + expr = (model.v1 + model.v2) ** (model.v3 - model.v4) + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [pytest.approx((2 + 3) ** (0.0000001), rel=1e-8)] + assert len(mm) == 0 + assert expr in cc + assert len(cc) == 1 + assert not k + assert not tr + + @pytest.fixture(scope="class") + def func_model(self): + m = ConcreteModel() + m.v1 = Var(initialize=1) + m.v2 = Var(initialize=0.99999) + m.v3 = Var(initialize=-100) + + m.v2.fix() + + return m + + @pytest.mark.unit + def test_negation_expr(self, func_model): + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression( + expr=-func_model.v1 + ) + + assert vv == [-1] + assert len(mm) == 0 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_negation_sum_expr(self, func_model): + expr = -(func_model.v1 - func_model.v2) + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + # Checking the cancellation should be deferred + # Expect to get two values back + assert vv == [pytest.approx(-1, rel=1e-8), pytest.approx(0.99999, rel=1e-8)] + assert len(mm) == 0 + # Check is deferred, so no cancellations identified + assert len(cc) == 0 + assert not k + assert not tr + + # acosh has bounds that don't fit with other functions - we will assume we caught enough + func_list = [ + exp, + log, + log10, + sqrt, + sin, + cos, + tan, + asin, + acos, + atan, + sinh, + cosh, + tanh, + asinh, + atanh, + ] + func_error_list = [log, log10, sqrt, asin, acos, acosh, atanh] + + @pytest.mark.unit + @pytest.mark.parametrize("func", func_list) + def test_func_expr(self, func_model, func): + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression( + expr=func(func_model.v2) + ) + + assert vv == [pytest.approx(value(func(0.99999)), rel=1e-8)] + assert len(mm) == 0 + assert len(cc) == 0 + assert k + assert not tr + + @pytest.mark.unit + @pytest.mark.parametrize("func", func_list) + def test_func_sum_expr(self, func_model, func): + expr = func(func_model.v1 - func_model.v2) + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [pytest.approx(value(func(0.00001)), rel=1e-8)] + assert len(mm) == 0 + assert expr in cc + assert len(cc) == 1 + assert not k + assert not tr + + @pytest.mark.unit + @pytest.mark.parametrize("func", func_list) + def test_func_sum_expr_w_negation(self, func_model, func): + expr = func(func_model.v1 - func_model.v2) + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [pytest.approx(value(func(0.00001)), rel=1e-8)] + assert len(mm) == 0 + assert expr in cc + assert len(cc) == 1 + assert not k + assert not tr + + @pytest.mark.unit + @pytest.mark.parametrize("func", func_error_list) + def test_func_expr_error(self, func_model, func): + with pytest.raises( + ValueError, + match=re.escape( + "Error in ConstraintTermAnalysisVisitor: error evaluating " + ), + ): + ConstraintTermAnalysisVisitor().walk_expression(expr=func(func_model.v3)) + + @pytest.mark.unit + def test_expr_if(self, model): + model.exprif = Expr_if( + IF=model.v1, + THEN=model.v1 + model.v2, + ELSE=model.v3 - model.v4, + ) + + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression( + expr=model.exprif + ) + + assert vv == [pytest.approx(5, rel=1e-8)] + assert len(mm) == 0 + assert model.exprif in cc + assert len(cc) == 1 + assert not k + assert not tr + + @pytest.mark.unit + @pytest.mark.skipif( + not AmplInterface.available(), reason="pynumero_ASL is not available" + ) + @pytest.mark.skipif(not cubic_roots_available, reason="Cubic roots not available") + def test_ext_func(self): + # Use the cubic root external function to test + m = ConcreteModel() + m.a = Var(initialize=1) + m.b = Var(initialize=1) + + m.expr_write = CubicThermoExpressions(m) + Z = m.expr_write.z_liq(eos=CubicEoS.PR, A=m.a, B=m.b) + + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=Z) + + assert vv == [pytest.approx(-2.1149075414767577, rel=1e-8)] + assert len(mm) == 0 + assert Z in cc + assert len(cc) == 1 + assert not k + assert not tr + + # Check that model state did not change + assert value(m.a) == 1 + assert value(m.b) == 1 + assert value(Z) == pytest.approx(-2.1149075414767577, rel=1e-8) + + @pytest.mark.unit + def test_equality_sum_expr(self): + m = ConcreteModel() + m.v1 = Var(["a", "b", "c"], initialize=1e7) + m.v2 = Var(initialize=1e-7) + + expr = m.v2 == sum(m.v1[i] for i in m.v1) + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [-1e-7, 3e7] + assert expr in mm + assert len(mm) == 1 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_inequality_sum_expr(self): + m = ConcreteModel() + m.v1 = Var(["a", "b", "c"], initialize=1e7) + m.v2 = Var(initialize=1e-7) + + expr = m.v2 <= sum(m.v1[i] for i in m.v1) + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [-1e-7, 3e7] + assert expr in mm + assert len(mm) == 1 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_compound_equality_expr_1(self): + m = ConcreteModel() + m.v1 = Var(["a", "b", "c"], initialize=1e7) + m.v2 = Var(initialize=1e-7) + + expr = 6 * m.v2 == 8 * sum(m.v1[i] for i in m.v1) + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [-6e-7, 2.4e8] + assert expr in mm + assert len(mm) == 1 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_ranged_expr(self): + m = ConcreteModel() + m.v1 = Var(initialize=1e7) + m.v2 = Var(initialize=1e-7) + m.v3 = Var(initialize=1e7) + + m.expr = EXPR.RangedExpression(args=(m.v1, m.v2, m.v3), strict=True) + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=m.expr) + + assert vv == [-1e7, 1e-7, -1e7] + assert m.expr in mm + assert len(mm) == 1 + assert len(cc) == 0 + assert not k + assert not tr + + # Fix v1 and v2 to make first two terms constant + m.v1.fix() + m.v2.fix() + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=m.expr) + + # Should not be flagged as constant due to v3 + assert vv == [-1e7, 1e-7, -1e7] + assert m.expr in mm + assert len(mm) == 1 + assert len(cc) == 0 + assert not k + assert not tr + + # Fix v3 to make all terms constant + m.v3.fix() + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=m.expr) + + # Should now be constant + assert vv == [-1e7, 1e-7, -1e7] + assert m.expr in mm + assert len(mm) == 1 + assert len(cc) == 0 + assert k + assert not tr + + @pytest.mark.unit + def test_compound_equality_expr_2(self): + m = ConcreteModel() + m.v1 = Var(["a", "b", "c"], initialize=1e7) + m.v2 = Var(initialize=1e-7) + m.v3 = Var(initialize=1e3) + + # Set this small so we get two mismatched warnings + m.v1["a"].set_value(1e-7) + + expr1 = sum(m.v1[i] for i in m.v1) + expr = 6 * m.v2 == 8 * expr1 + m.v3 + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [-6e-7, pytest.approx(8 * (2e7 + 1e-7) + 1000, rel=1e-8)] + assert expr in mm + assert expr1 in mm + assert len(mm) == 2 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_canceling_sum_expr(self): + m = ConcreteModel() + m.v1 = Var(initialize=2) + m.v2 = Var(initialize=2) + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression( + expr=m.v1 - m.v2 + ) + + assert vv == [2, -2] + assert len(mm) == 0 + # We do not check cancellation at the sum, so this should be empty + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_expr_w_canceling_sum(self): + m = ConcreteModel() + m.v1 = Var(initialize=2) + m.v2 = Var(initialize=2) + m.v3 = Var(initialize=3) + + expr = m.v3 * (m.v1 - m.v2) + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [6, -6] + assert len(mm) == 0 + # Check for cancellation should be deferred + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_expr_w_deferred_canceling_sum(self): + m = ConcreteModel() + m.v1 = Var(initialize=2) + m.v2 = Var(initialize=2) + m.v3 = Var(initialize=3) + + expr = (m.v3 * (m.v1 - m.v2)) ** 2 + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [0] + assert len(mm) == 0 + # We should get a warning about canceling sums here + assert expr in cc + assert len(cc) == 1 + assert not k + assert not tr + + # Check for tolerance of sum cancellation + m.v2.set_value(2.00022) + + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor( + term_cancellation_tolerance=1e-4 + ).walk_expression(expr=expr) + + assert vv == [pytest.approx((3 * -0.00022) ** 2, rel=1e-8)] + assert len(mm) == 0 + # This should pass as the difference is greater than tol + assert len(cc) == 0 + assert not k + assert not tr + + # Change tolerance so it should identify cancellation + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor( + term_cancellation_tolerance=1e-3 + ).walk_expression(expr=expr) + + assert vv == [pytest.approx((3 * -0.00022) ** 2, rel=1e-8)] + assert len(mm) == 0 + assert expr in cc + assert len(cc) == 1 + assert not k + assert not tr + + @pytest.mark.unit + def test_canceling_equality_expr_safe(self): + m = ConcreteModel() + m.v1 = Var(initialize=1e7) + m.v2 = Var(initialize=1e7) + + # This is a standard constraint form, so should have no issues despite cancellation + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression( + expr=0 == m.v1 - m.v2 + ) + + assert vv == [0, pytest.approx(0, abs=1e-12)] + assert len(mm) == 0 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_canceling_equality_expr_zero_term(self): + m = ConcreteModel() + m.v1 = Var(initialize=1e7) + m.v2 = Var(initialize=1e7) + m.v3 = Var(initialize=0) + + expr = m.v3 == m.v1 - m.v2 + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [0, pytest.approx(0, abs=1e-12)] + assert len(mm) == 0 + # No canceling terms as v3=0 is ignored thus we have a=b form + assert len(cc) == 0 + assert not k + assert not tr + + # Set v3 above zero tolerance + m.v3.set_value(1e-4) + + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + assert vv == [-1e-4, pytest.approx(0, abs=1e-12)] + assert len(mm) == 0 + assert expr in cc + assert len(cc) == 1 + assert not k + assert not tr + + @pytest.mark.unit + def test_canceling_equality_expr_compound(self): + m = ConcreteModel() + m.v1 = Var(["a", "b"], initialize=5e6) + m.v2 = Var(initialize=1e7) + m.v3 = Var(initialize=0) + + expr = m.v3 == sum(v for v in m.v1.values()) - m.v2 + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [0, pytest.approx(0, abs=1e-12)] + assert len(mm) == 0 + # No canceling terms as v3=0 is ignored thus we have a=b form + assert len(cc) == 0 + assert not k + assert not tr + + # Set v3 above zero tolerance + m.v3.set_value(1e-4) + + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + assert vv == [-1e-4, pytest.approx(0, abs=1e-12)] + assert len(mm) == 0 + assert expr in cc + assert len(cc) == 1 + assert not k + assert not tr + + # Double check for a+eps=c form gets flagged in some way + @pytest.mark.unit + def test_canceling_equality_expr_canceling_sides(self): + m = ConcreteModel() + m.v1 = Var(initialize=1) + m.v2 = Var(initialize=1) + m.v3 = Var(initialize=1e-8) + + expr1 = m.v1 + m.v3 + expr = m.v2 == expr1 + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [-1, pytest.approx(1 + 1e-8, abs=1e-8)] + assert expr1 in mm + assert len(mm) == 1 + assert expr in cc + assert len(cc) == 1 + assert not k + assert not tr + + # Check to make sure simple linking constraints are not flagged as canceling + @pytest.mark.unit + def test_linking_equality_expr(self): + m = ConcreteModel() + m.v1 = Var(initialize=1) + m.v2 = Var(initialize=1) + + expr = m.v1 == m.v2 + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [-1, 1] + assert len(mm) == 0 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_linking_equality_expr_compound(self): + m = ConcreteModel() + m.v1 = Var(initialize=1) + m.v2 = Var(initialize=1) + m.v3 = Var(initialize=1) + + expr = m.v1 == m.v2 * m.v3 + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [-1, 1] + assert len(mm) == 0 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.component + def test_external_function_w_string_argument(self): + m = ConcreteModel() + m.properties = iapws95.Iapws95ParameterBlock() + m.state = m.properties.build_state_block([0]) + + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression( + expr=m.state[0].enth_mol + ) + + assert vv == [pytest.approx(1.1021387e-2, rel=1e-6)] + assert len(mm) == 0 + assert len(cc) == 0 + assert not k + assert not tr + + # Test nested external functions + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression( + expr=m.state[0].temperature + ) + + assert vv == [pytest.approx(270.4877, rel=1e-6)] + assert len(mm) == 0 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_zero_tolerance(self): + m = ConcreteModel() + m.v1 = Var(initialize=1e-12) + m.v2 = Var(initialize=1) + m.v3 = Var(initialize=1e-12) + + expr1 = m.v1 - m.v3 + expr2 = expr1 + 1 + expr = m.v2 == expr2 + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [-1, pytest.approx(1, abs=1e-8)] + # We expect no mismatches, as smallest terms are below zero tolerance + assert len(mm) == 0 + # We expect no canceling terms, as v1 and v3 are ignored (zero tolerance), leaving + # 1 == 1 + assert len(cc) == 0 + assert not k + assert not tr + + # Set v1 and v3 above zero tolerance + m.v1.set_value(1e-8) + m.v3.set_value(1e-8) + + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [-1, pytest.approx(1, abs=1e-8)] + assert expr2 in mm + assert len(mm) == 1 + assert expr in cc + assert len(cc) == 1 + assert not k + assert not tr + + # Test to make sure scaled constraints are not flagged as issues + @pytest.mark.unit + def test_scaled_equality_expr_product(self): + m = ConcreteModel() + m.v1 = Var(initialize=1) + m.v2 = Var(initialize=1) + m.v3 = Var(initialize=2) + + expr = 0 == m.v3 * (m.v1 - m.v2) + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [0, 0] + assert len(mm) == 0 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_scaled_equality_expr_division(self): + m = ConcreteModel() + m.v1 = Var(initialize=1) + m.v2 = Var(initialize=1) + m.v3 = Var(initialize=2) + + expr = 0 == (m.v1 - m.v2) / m.v3 + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [0, 0] + assert len(mm) == 0 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_mole_fraction_constraint(self): + m = ConcreteModel() + m.mole_frac_a = Var(initialize=0.5) + m.flow_a = Var(initialize=100) + m.flow_b = Var(initialize=100) + + expr = m.mole_frac_a * (m.flow_a + m.flow_b) == m.flow_a + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [pytest.approx(-100, rel=1e-5), pytest.approx(100, rel=1e-5)] + assert len(mm) == 0 + assert len(cc) == 0 + assert not k + assert not tr + + @pytest.mark.unit + def test_mole_fraction_constraint_trace(self): + m = ConcreteModel() + m.mole_frac_a = Var(initialize=0.999810) + m.flow_a = Var(initialize=122.746) + m.flow_b = Var(initialize=0.0233239) + + expr = m.mole_frac_a * (m.flow_a + m.flow_b) == m.flow_a + vv, mm, cc, k, tr = ConstraintTermAnalysisVisitor().walk_expression(expr=expr) + + assert vv == [ + pytest.approx(-122.746, rel=1e-5), + pytest.approx(122.746, rel=1e-5), + ] + assert len(mm) == 0 + assert len(cc) == 0 + assert not k + assert not tr diff --git a/idaes/core/util/utility_minimization.py b/idaes/core/util/utility_minimization.py index 8ea9e35bd8..34ce574b27 100644 --- a/idaes/core/util/utility_minimization.py +++ b/idaes/core/util/utility_minimization.py @@ -28,7 +28,7 @@ __author__ = "Alejandro Garciadiego, Alexander Dowling" -# Temperature Epsilon to add or subsract 1 degree to avoid division over +# Temperature Epsilon to add or subtract 1 degree to avoid division over # 0 in equipment not changing temperature EpsT = 1 @@ -301,7 +301,7 @@ def heat_data(blk, heating, cooling, DG_units=pyunits.Mwatt): Q[i] = value(pyunits.convert(v.heat_duty[0], to_units=DG_units)) FCp_[i] = Q[i] / (T_out[i] - T_in[i]) - # Generate a large dictioary containing all the data obtained + # Generate a large dictionary containing all the data obtained # from the equipment exchangeData = {} for i in pinch_streamsdict: @@ -413,7 +413,7 @@ def pinch_calc(heating, cooling, exchangeData, DTmin, eps): initQw = -sum(value(exchangeData[i]["Q"]) for i in exchangerdict) + initQs initQw = max([initQw, 0.0]) - # Fill Class with all the data to initialioze Duran-Grossmann variables + # Fill Class with all the data to initialize Duran-Grossmann variables PD = PinchDataClass(initQs, initQw) PD.initQAh = initQAh PD.initQAc = initQAc diff --git a/idaes/models/properties/modular_properties/base/generic_property.py b/idaes/models/properties/modular_properties/base/generic_property.py index 26a31b3d57..2dcb428bea 100644 --- a/idaes/models/properties/modular_properties/base/generic_property.py +++ b/idaes/models/properties/modular_properties/base/generic_property.py @@ -304,7 +304,8 @@ def build(self): # Add Phase objects if self.config.phases is None: raise ConfigurationError( - "{} was not provided with a phases argument.".format(self.name) + f"{self.name} was not provided with a phases argument. " + "Did you forget to unpack the configurations dictionary?" ) # Add a flag indicating whether this is an electrolyte system or not diff --git a/idaes/models/properties/modular_properties/base/generic_reaction.py b/idaes/models/properties/modular_properties/base/generic_reaction.py index 531a7bd06d..6ad5644c3a 100644 --- a/idaes/models/properties/modular_properties/base/generic_reaction.py +++ b/idaes/models/properties/modular_properties/base/generic_reaction.py @@ -199,6 +199,13 @@ def build(self): # and cannot be set until the config block is created by super.build super(ReactionParameterBlock, self).build() + # Check to make sure a property block was assigned + if self.config.property_package is None: + raise ConfigurationError( + f"{self.name} was not assigned a property package. " + "Did you forget to unpack the configuration dictionary?" + ) + # Set base units of measurement self.get_metadata().add_default_units(self.config.base_units) diff --git a/idaes/models/properties/modular_properties/base/tests/test_generic_property.py b/idaes/models/properties/modular_properties/base/tests/test_generic_property.py index 716c824443..39a8e84e13 100644 --- a/idaes/models/properties/modular_properties/base/tests/test_generic_property.py +++ b/idaes/models/properties/modular_properties/base/tests/test_generic_property.py @@ -199,7 +199,7 @@ def test_no_components(self): with pytest.raises( ConfigurationError, - match="params was not provided with a components " "argument.", + match="params was not provided with a components argument.", ): m.params = DummyParameterBlock( phases={ @@ -215,12 +215,33 @@ def test_no_phases(self): with pytest.raises( ConfigurationError, - match="params was not provided with a phases " "argument.", + match="params was not provided with a phases argument. " + "Did you forget to unpack the configurations dictionary?", ): m.params = DummyParameterBlock( components={"a": {}, "b": {}, "c": {}}, base_units=base_units ) + @pytest.mark.unit + def test_packed_dict(self): + m = ConcreteModel() + + dummy_dict = { + "phases": { + "p1": {"equation_of_state": "foo"}, + "p2": {"equation_of_state": "bar"}, + }, + } + + with pytest.raises( + ConfigurationError, + match=re.escape( + "params[phases] was not provided with a phases argument. " + "Did you forget to unpack the configurations dictionary?" + ), + ): + m.params = DummyParameterBlock(dummy_dict) + @pytest.mark.unit def test_invalid_component_in_phase_component_list(self): m = ConcreteModel() diff --git a/idaes/models/properties/modular_properties/base/tests/test_generic_reaction.py b/idaes/models/properties/modular_properties/base/tests/test_generic_reaction.py index 39f81c1921..722837906d 100644 --- a/idaes/models/properties/modular_properties/base/tests/test_generic_reaction.py +++ b/idaes/models/properties/modular_properties/base/tests/test_generic_reaction.py @@ -193,6 +193,17 @@ def test_rate_build_no_stoichiometry(self, m): rate_reactions={"r1": {"heat_of_reaction": "foo", "rate_form": "foo"}}, ) + @pytest.mark.unit + def test_packed_config_dict(self, m): + with pytest.raises( + ConfigurationError, + match=re.escape( + "rxn_params[property_package] was not assigned a property package. " + "Did you forget to unpack the configuration dictionary?" + ), + ): + m.rxn_params = GenericReactionParameterBlock({"property_package": m.params}) + @pytest.mark.unit def test_rate_build_invalid_phase_stoichiometry(self, m): with pytest.raises( diff --git a/idaes/models_extra/column_models/tests/test_plate_heat_exchanger.py b/idaes/models_extra/column_models/tests/test_plate_heat_exchanger.py index cd08ffbb67..4b305a1630 100644 --- a/idaes/models_extra/column_models/tests/test_plate_heat_exchanger.py +++ b/idaes/models_extra/column_models/tests/test_plate_heat_exchanger.py @@ -14,8 +14,8 @@ Tests for Plate Heat Exchnager unit model. Author: Akula Paul """ - import pytest + from pyomo.environ import ( check_optimal_termination, ConcreteModel, @@ -23,6 +23,8 @@ units as pyunits, value, ) +from pyomo.util.check_units import assert_units_consistent, assert_units_equivalent + from idaes.core import FlowsheetBlock from idaes.models_extra.column_models.plate_heat_exchanger import ( PlateHeatExchanger as PHE, @@ -37,7 +39,6 @@ from idaes.core.util.model_statistics import degrees_of_freedom from idaes.core.util.testing import initialization_tester from idaes.core.solvers import get_solver -from pyomo.util.check_units import assert_units_consistent, assert_units_equivalent # ----------------------------------------------------------------------------- @@ -66,8 +67,9 @@ def test_config(): workaround_for_1294 = pytest.mark.xfail( + # the failures only occur for Windows on GHA with Python <3.12, and Linux with Python 3.12 reason="These tests fail with Pyomo 6.7.0. See IDAES/idaes-pse#1294 for details", - strict=False, # the failures only occur for certain platforms, e.g. Windows on GHA + strict=False, ) diff --git a/setup.py b/setup.py index 407a027159..19868f5d1d 100644 --- a/setup.py +++ b/setup.py @@ -44,6 +44,7 @@ class ExtraDependencies: omlt = [ "omlt==1.1", # fix the version for now as package evolves "tensorflow", + "onnx", ] grid = [ "gridx-prescient>=2.2.1", # idaes.tests.prescient @@ -81,7 +82,7 @@ def __getitem__(self, key): # Put abstract (non-versioned) deps here. # Concrete dependencies go in requirements[-dev].txt install_requires=[ - "pyomo >= 6.8.0", + "pyomo @ https://github.com/IDAES/pyomo/archive/6.8.0.idaes.2024.10.25.zip", "pint >= 0.24.1", # required to use Pyomo units. Pint 0.24.1 needed for Python 3.9 support "networkx", # required to use Pyomo network "numpy>=1,<3", @@ -124,6 +125,7 @@ def __getitem__(self, key): "*.trc", "*.nl", "*.keras", # idaes/core/surrogate/tests/data/keras_models + "*.onnx", ] }, include_package_data=True,