diff --git a/pyproject.toml b/pyproject.toml index abad27f0..ea29be34 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -73,10 +73,13 @@ include = ["physrisk*"] [tool.pdm.dev-dependencies] test = [ - "pdm[pytest]", - "pytest", - "pytest-cov", - "sphinx-pyproject" + "pdm[pytest]", + "pytest", + "pytest-cov", + "sphinx-pyproject", + "pandas>=2.0.3", + "dependency-injector>=4.41.0", + "geopandas<1.0,>=0.13.2", ] dev = [ "mypy", diff --git a/tests/api/container_test.py b/tests/api/container_test.py index c0875190..de4fa34d 100644 --- a/tests/api/container_test.py +++ b/tests/api/container_test.py @@ -2,7 +2,6 @@ from dependency_injector import containers, providers from physrisk.data.inventory_reader import InventoryReader - from ..data.hazard_model_store_test import mock_hazard_model_store_heat diff --git a/tests/api/data_requests_test.py b/tests/api/data_requests_test.py index 6076751a..1f9e147c 100644 --- a/tests/api/data_requests_test.py +++ b/tests/api/data_requests_test.py @@ -1,19 +1,18 @@ -import unittest +import pytest import numpy as np -import numpy.testing -from physrisk import requests -from physrisk.container import Container +from physrisk.hazard_models import core_hazards from physrisk.data.hazard_data_provider import HazardDataHint from physrisk.data.inventory import EmbeddedInventory from physrisk.data.pregenerated_hazard_model import ZarrHazardModel from physrisk.data.zarr_reader import ZarrReader from physrisk.hazard_models.core_hazards import get_default_source_paths from physrisk.kernel.hazards import ChronicHeat, RiverineInundation +from physrisk.container import Container +from physrisk import requests from ..api.container_test import TestContainer -from ..base_test import TestWithCredentials from ..data.hazard_model_store_test import ( TestData, get_mock_hazard_model_store_single_curve, @@ -21,215 +20,185 @@ ) -class TestDataRequests(TestWithCredentials): - def setUp(self): - super().setUp() - - def tearDown(self): - super().tearDown() - - def test_hazard_data_availability(self): - # test that validation passes: - container = Container() - container.override(TestContainer()) - requester = container.requester() - _ = requester.get(request_id="get_hazard_data_availability", request_dict={}) - - @unittest.skip("requires mocking.") - def test_hazard_data_description(self): - # test that validation passes: - container = Container() - requester = container.requester - _ = requester.get( - request_id="get_hazard_data_description", - request_dict={"paths": ["test_path.md"]}, - ) - - def test_generic_source_path(self): - inventory = EmbeddedInventory() - source_paths = get_default_source_paths(inventory) - result_heat = source_paths[ChronicHeat]( - indicator_id="mean_degree_days/above/32c", scenario="rcp8p5", year=2050 - ) - result_flood = source_paths[RiverineInundation]( - indicator_id="flood_depth", scenario="rcp8p5", year=2050 - ) - result_flood_hist = source_paths[RiverineInundation]( - indicator_id="flood_depth", scenario="historical", year=2080 - ) - result_heat_hint = source_paths[ChronicHeat]( - indicator_id="mean_degree_days/above/32c", - scenario="rcp8p5", - year=2050, - hint=HazardDataHint( - path="chronic_heat/osc/v2/mean_degree_days_v2_above_32c_CMCC-ESM2_{scenario}_{year}" - ), - ) - - assert ( - result_heat - == "chronic_heat/osc/v2/mean_degree_days_v2_above_32c_ACCESS-CM2_rcp8p5_2050" - ) - assert result_flood == "inundation/wri/v2/inunriver_rcp8p5_MIROC-ESM-CHEM_2050" - assert ( - result_flood_hist - == "inundation/wri/v2/inunriver_historical_000000000WATCH_1980" - ) - assert ( - result_heat_hint - == "chronic_heat/osc/v2/mean_degree_days_v2_above_32c_CMCC-ESM2_rcp8p5_2050" - ) - - def test_zarr_reading(self): - request_dict = { - "items": [ - { - "request_item_id": "test_inundation", - "event_type": "RiverineInundation", - "longitudes": TestData.longitudes[ - 0:3 - ], # coords['longitudes'][0:100], - "latitudes": TestData.latitudes[0:3], # coords['latitudes'][0:100], - "year": 2080, - "scenario": "rcp8p5", - "indicator_id": "flood_depth", - "indicator_model_gcm": "MIROC-ESM-CHEM", - } - ], - } - # validate request - request = requests.HazardDataRequest(**request_dict) # type: ignore - - store = get_mock_hazard_model_store_single_curve() - - result = requests._get_hazard_data( - request, - ZarrHazardModel( - source_paths=get_default_source_paths(EmbeddedInventory()), - reader=ZarrReader(store=store), - ), - ) - - numpy.testing.assert_array_almost_equal_nulp( - result.items[0].intensity_curve_set[0].intensities, np.zeros((9)) - ) - numpy.testing.assert_array_almost_equal_nulp( - result.items[0].intensity_curve_set[1].intensities, - np.linspace(0.1, 1.0, 9, dtype="f4"), - ) - numpy.testing.assert_array_almost_equal_nulp( - result.items[0].intensity_curve_set[2].intensities, np.zeros((9)) - ) - - def test_zarr_reading_chronic(self): - request_dict = { - "group_ids": ["osc"], - "items": [ - { - "request_item_id": "test_inundation", - "event_type": "ChronicHeat", - "longitudes": TestData.longitudes[ - 0:3 - ], # coords['longitudes'][0:100], - "latitudes": TestData.latitudes[0:3], # coords['latitudes'][0:100], - "year": 2050, - "scenario": "ssp585", - "indicator_id": "mean_degree_days/above/32c", - } - ], - } - # validate request - request = requests.HazardDataRequest(**request_dict) # type: ignore - - store = mock_hazard_model_store_heat(TestData.longitudes, TestData.latitudes) - - source_paths = get_default_source_paths(EmbeddedInventory()) - result = requests._get_hazard_data( - request, - ZarrHazardModel(source_paths=source_paths, reader=ZarrReader(store)), - ) - numpy.testing.assert_array_almost_equal_nulp( - result.items[0].intensity_curve_set[0].intensities[0], 600.0 - ) - - # request_with_hint = request.copy() - # request_with_hint.items[0].path = "chronic_heat/osc/v2/mean_degree_days_v2_above_32c_CMCC-ESM2_rcp8p5_2050" - # result = requests._get_hazard_data( - # request_with_hint, ZarrHazardModel(source_paths=source_paths, reader=ZarrReader(store)) - # ) - - @unittest.skip("requires OSC environment variables set") - def test_zarr_reading_live(self): - # needs valid OSC_S3_BUCKET, OSC_S3_ACCESS_KEY, OSC_S3_SECRET_KEY - container = Container() - requester = container.requester() - - import json - from zipfile import ZipFile - - with ZipFile("src/test/api/test_lat_lons.json.zip") as z: - with z.open("test_lat_lons.json") as f: - data = json.loads(f.read()) - - request1 = { - "items": [ - { - "request_item_id": "test_inundation", - "event_type": "ChronicHeat", - "longitudes": TestData.longitudes, - "latitudes": TestData.latitudes, - "year": 2030, - "scenario": "ssp585", - "indicator_id": "mean_work_loss/high", - } - ], - } - - request1 = { - "items": [ - { - "request_item_id": "test_inundation", - "event_type": "ChronicHeat", - "longitudes": data["longitudes"], - "latitudes": data["latitudes"], - "year": 2030, - "scenario": "ssp585", - "indicator_id": "mean_work_loss/high", - } - ], - } - - response_floor = requester.get( - request_id="get_hazard_data", request_dict=request1 - ) - request1["interpolation"] = "linear" # type: ignore - response_linear = requester.get( - request_id="get_hazard_data", request_dict=request1 - ) - print(response_linear) - - floor = json.loads(response_floor)["items"][0]["intensity_curve_set"][5][ - "intensities" - ] - linear = json.loads(response_linear)["items"][0]["intensity_curve_set"][5][ - "intensities" - ] - - print(floor) - print(linear) - - request2 = { - "items": [ - { - "request_item_id": "test_inundation", - "event_type": "CoastalInundation", - "longitudes": TestData.coastal_longitudes, - "latitudes": TestData.coastal_latitudes, - "year": 2080, - "scenario": "rcp8p5", - "model": "wtsub/95", - } - ], - } - response = requester.get(request_type="get_hazard_data", request_dict=request2) - print(response) +def test_hazard_data_availability(): + # test that validation passes: + container = Container() + container.override(TestContainer()) + requester = container.requester() + _ = requester.get(request_id="get_hazard_data_availability", request_dict={}) + + +@pytest.mark.skip(reason="requires mocking.") +def test_hazard_data_description(): + # test that validation passes: + container = Container() + requester = container.requester + _ = requester.get( + request_id="get_hazard_data_description", + request_dict={"paths": ["test_path.md"]}, + ) + + +def test_generic_source_path(): + inventory = EmbeddedInventory() + source_paths = core_hazards.get_default_source_paths(inventory) + result_heat = source_paths[ChronicHeat]( + indicator_id="mean_degree_days/above/32c", scenario="rcp8p5", year=2050 + ) + result_flood = source_paths[RiverineInundation]( + indicator_id="flood_depth", scenario="rcp8p5", year=2050 + ) + result_flood_hist = source_paths[RiverineInundation]( + indicator_id="flood_depth", scenario="historical", year=2080 + ) + result_heat_hint = source_paths[ChronicHeat]( + indicator_id="mean_degree_days/above/32c", + scenario="rcp8p5", + year=2050, + hint=HazardDataHint( + path="chronic_heat/osc/v2/mean_degree_days_v2_above_32c_CMCC-ESM2_{scenario}_{year}" + ), + ) + + assert ( + result_heat + == "chronic_heat/osc/v2/mean_degree_days_v2_above_32c_ACCESS-CM2_rcp8p5_2050" + ) + assert result_flood == "inundation/wri/v2/inunriver_rcp8p5_MIROC-ESM-CHEM_2050" + assert ( + result_flood_hist + == "inundation/wri/v2/inunriver_historical_000000000WATCH_1980" + ) + assert ( + result_heat_hint + == "chronic_heat/osc/v2/mean_degree_days_v2_above_32c_CMCC-ESM2_rcp8p5_2050" + ) + + +def test_zarr_reading(): + request_dict = { + "items": [ + { + "request_item_id": "test_inundation", + "event_type": "RiverineInundation", + "longitudes": TestData.longitudes[0:3], # coords['longitudes'][0:100], + "latitudes": TestData.latitudes[0:3], # coords['latitudes'][0:100], + "year": 2080, + "scenario": "rcp8p5", + "indicator_id": "flood_depth", + "indicator_model_gcm": "MIROC-ESM-CHEM", + } + ], + } + # validate request + request = requests.HazardDataRequest(**request_dict) # type: ignore + + store = get_mock_hazard_model_store_single_curve() + + result = requests._get_hazard_data( + request, + ZarrHazardModel( + source_paths=get_default_source_paths(EmbeddedInventory()), + reader=ZarrReader(store=store), + ), + ) + + np.testing.assert_array_almost_equal_nulp( + result.items[0].intensity_curve_set[0].intensities, np.zeros((9)) + ) + np.testing.assert_array_almost_equal_nulp( + result.items[0].intensity_curve_set[1].intensities, + np.linspace(0.1, 1.0, 9, dtype="f4"), + ) + np.testing.assert_array_almost_equal_nulp( + result.items[0].intensity_curve_set[2].intensities, np.zeros((9)) + ) + + +def test_zarr_reading_chronic(): + request_dict = { + "group_ids": ["osc"], + "items": [ + { + "request_item_id": "test_inundation", + "event_type": "ChronicHeat", + "longitudes": TestData.longitudes[0:3], # coords['longitudes'][0:100], + "latitudes": TestData.latitudes[0:3], # coords['latitudes'][0:100], + "year": 2050, + "scenario": "ssp585", + "indicator_id": "mean_degree_days/above/32c", + } + ], + } + # validate request + request = requests.HazardDataRequest(**request_dict) # type: ignore + + store = mock_hazard_model_store_heat(TestData.longitudes, TestData.latitudes) + + source_paths = get_default_source_paths(EmbeddedInventory()) + result = requests._get_hazard_data( + request, + ZarrHazardModel(source_paths=source_paths, reader=ZarrReader(store)), + ) + np.testing.assert_array_almost_equal_nulp( + result.items[0].intensity_curve_set[0].intensities[0], 600.0 + ) + + +def test_zarr_reading_live(load_credentials): + # needs valid OSC_S3_BUCKET, OSC_S3_ACCESS_KEY, OSC_S3_SECRET_KEY + container = Container() + requester = container.requester() + + import json + from zipfile import ZipFile + + with ZipFile("./tests/api/test_lat_lons.json.zip") as z: + with z.open("test_lat_lons.json") as f: + data = json.loads(f.read()) + + request1 = { + "items": [ + { + "request_item_id": "test_inundation", + "event_type": "ChronicHeat", + "longitudes": data["longitudes"], + "latitudes": data["latitudes"], + "year": 2030, + "scenario": "ssp585", + "indicator_id": "mean_work_loss/high", + } + ], + } + + response_floor = requester.get(request_id="get_hazard_data", request_dict=request1) + request1["interpolation"] = "linear" # type: ignore + response_linear = requester.get(request_id="get_hazard_data", request_dict=request1) + print(response_linear) + + floor = json.loads(response_floor)["items"][0]["intensity_curve_set"][5][ + "intensities" + ] + linear = json.loads(response_linear)["items"][0]["intensity_curve_set"][5][ + "intensities" + ] + + print(floor) + print(linear) + + request2 = { + "items": [ + { + "request_item_id": "test_inundation", + "event_type": "CoastalInundation", + "longitudes": TestData.coastal_longitudes, + "latitudes": TestData.coastal_latitudes, + "year": 2080, + "scenario": "rcp8p5", + "indicator_id": "flood_depth", + "model_id": "wtsub/95", + } + ], + } + response = requester.get(request_id="get_hazard_data", request_dict=request2) + print(response) diff --git a/tests/api/housing_kaggle_spain.json.bz2 b/tests/api/housing_kaggle_spain.json.bz2 new file mode 100644 index 00000000..65d3cf75 Binary files /dev/null and b/tests/api/housing_kaggle_spain.json.bz2 differ diff --git a/tests/api/impact_requests_test.py b/tests/api/impact_requests_test.py index 3e0ad05f..2c9aca16 100644 --- a/tests/api/impact_requests_test.py +++ b/tests/api/impact_requests_test.py @@ -1,13 +1,11 @@ import json -import unittest - import numpy as np +import os from pydantic import TypeAdapter +import pytest -from physrisk import requests -from physrisk.api.v1.common import Asset, Assets +from physrisk.api.v1.common import Assets, Asset from physrisk.api.v1.impact_req_resp import RiskMeasures, RiskMeasuresHelper -from physrisk.container import Container from physrisk.data.inventory import EmbeddedInventory from physrisk.data.pregenerated_hazard_model import ZarrHazardModel from physrisk.data.zarr_reader import ZarrReader @@ -17,6 +15,8 @@ RealEstateAsset, ThermalPowerGeneratingAsset, ) + +from physrisk.kernel import calculation # noqa: F401 ## Avoid circular imports from physrisk.kernel.vulnerability_model import DictBasedVulnerabilityModels from physrisk.vulnerability_models.power_generating_asset_models import InundationModel from physrisk.vulnerability_models.real_estate_models import ( @@ -30,8 +30,9 @@ ThermalPowerGenerationWaterStressModel, ThermalPowerGenerationWaterTemperatureModel, ) +from physrisk.container import Container +from physrisk import requests -from ..base_test import TestWithCredentials from ..data.hazard_model_store_test import ( TestData, add_curves, @@ -40,814 +41,802 @@ zarr_memory_store, ) -# from physrisk.api.v1.impact_req_resp import AssetImpactResponse -# from physrisk.data.static.world import get_countries_and_continents - - -class TestImpactRequests(TestWithCredentials): - def test_asset_list_json(self): - assets = { - "items": [ - { - "asset_class": "RealEstateAsset", - "type": "Buildings/Industrial", - "location": "Asia", - "longitude": 69.4787, - "latitude": 34.556, - }, - { - "asset_class": "PowerGeneratingAsset", - "type": "Nuclear", - "location": "Asia", - "longitude": -70.9157, - "latitude": -39.2145, - }, - ], - } - assets_obj = Assets(**assets) - self.assertIsNotNone(assets_obj) - - def test_extra_fields(self): - assets = { - "items": [ - { - "asset_class": "RealEstateAsset", - "type": "Buildings/Industrial", - "location": "Asia", - "longitude": 69.4787, - "latitude": 34.556, - "extra_field": 2.0, - "capacity": 1000.0, - } - ], - } - assets = requests.create_assets(Assets(**assets)) - # in the case of RealEstateAsset, extra fields are allowed, including those not in the Pydantic Asset object - assert assets[0].capacity == 1000.0 - assert assets[0].extra_field == 2.0 - - def test_impact_request(self): - """Runs short asset-level impact request.""" - - assets = { - "items": [ - { - "asset_class": "RealEstateAsset", - "type": "Buildings/Industrial", - "location": "Asia", - "longitude": TestData.longitudes[0], - "latitude": TestData.latitudes[0], - }, - { - "asset_class": "PowerGeneratingAsset", - "type": "Nuclear", - "location": "Asia", - "longitude": TestData.longitudes[1], - "latitude": TestData.latitudes[1], - }, - ], - } - - request_dict = { - "assets": assets, - "include_asset_level": True, - "include_measures": False, - "include_calc_details": True, - "years": [2080], - "scenarios": ["rcp8p5"], - } - - request = requests.AssetImpactRequest(**request_dict) # type: ignore - - curve = np.array( - [0.0596, 0.333, 0.505, 0.715, 0.864, 1.003, 1.149, 1.163, 1.163] - ) - store = mock_hazard_model_store_inundation( - TestData.longitudes, TestData.latitudes, curve - ) - source_paths = get_default_source_paths(EmbeddedInventory()) - vulnerability_models = DictBasedVulnerabilityModels( +def test_asset_list_json(): + assets = { + "items": [ { - PowerGeneratingAsset: [InundationModel()], - RealEstateAsset: [ - RealEstateCoastalInundationModel(), - RealEstateRiverineInundationModel(), - ], + "asset_class": "RealEstateAsset", + "type": "Buildings/Industrial", + "location": "Asia", + "longitude": 69.4787, + "latitude": 34.556, + }, + { + "asset_class": "PowerGeneratingAsset", + "type": "Nuclear", + "location": "Asia", + "longitude": -70.9157, + "latitude": -39.2145, + }, + ], + } + assets_obj = Assets(**assets) + assert assets_obj is not None + + +def test_extra_fields(): + assets = { + "items": [ + { + "asset_class": "RealEstateAsset", + "type": "Buildings/Industrial", + "location": "Asia", + "longitude": 69.4787, + "latitude": 34.556, + "extra_field": 2.0, + "capacity": 1000.0, } - ) - - response = requests._get_asset_impacts( - request, - ZarrHazardModel(source_paths=source_paths, reader=ZarrReader(store)), - vulnerability_models=vulnerability_models, - ) - - self.assertEqual( - response.asset_impacts[0].impacts[0].hazard_type, "CoastalInundation" - ) - - def test_risk_model_impact_request(self): - """Tests the risk model functionality of the impact request.""" - - assets = { - "items": [ - { - "asset_class": "RealEstateAsset", - "type": "Buildings/Industrial", - "location": "Asia", - "longitude": TestData.longitudes[0], - "latitude": TestData.latitudes[0], - }, - { - "asset_class": "PowerGeneratingAsset", - "type": "Nuclear", - "location": "Asia", - "longitude": TestData.longitudes[1], - "latitude": TestData.latitudes[1], - }, + ], + } + assets = requests.create_assets(Assets(**assets)) + # in the case of RealEstateAsset, extra fields are allowed, including those not in the Pydantic Asset object + assert assets[0].capacity == 1000.0 + assert assets[0].extra_field == 2.0 + + +def test_impact_request(): + """Runs short asset-level impact request.""" + assets = { + "items": [ + { + "asset_class": "RealEstateAsset", + "type": "Buildings/Industrial", + "location": "Asia", + "longitude": TestData.longitudes[0], + "latitude": TestData.latitudes[0], + }, + { + "asset_class": "PowerGeneratingAsset", + "type": "Nuclear", + "location": "Asia", + "longitude": TestData.longitudes[1], + "latitude": TestData.latitudes[1], + }, + ], + } + + request_dict = { + "assets": assets, + "include_asset_level": True, + "include_measures": False, + "include_calc_details": True, + "years": [2080], + "scenarios": ["rcp8p5"], + } + + request = requests.AssetImpactRequest(**request_dict) # type: ignore + + curve = np.array([0.0596, 0.333, 0.505, 0.715, 0.864, 1.003, 1.149, 1.163, 1.163]) + store = mock_hazard_model_store_inundation( + TestData.longitudes, TestData.latitudes, curve + ) + + source_paths = get_default_source_paths(EmbeddedInventory()) + vulnerability_models = DictBasedVulnerabilityModels( + { + PowerGeneratingAsset: [InundationModel()], + RealEstateAsset: [ + RealEstateCoastalInundationModel(), + RealEstateRiverineInundationModel(), ], } + ) - request_dict = { - "assets": assets, - "include_asset_level": True, - "include_measures": False, - "include_calc_details": True, - "years": [2080], - "scenarios": ["rcp8p5"], - } - - request = requests.AssetImpactRequest(**request_dict) # type: ignore - - curve = np.array( - [0.0596, 0.333, 0.505, 0.715, 0.864, 1.003, 1.149, 1.163, 1.163] - ) - store = mock_hazard_model_store_inundation( - TestData.longitudes, TestData.latitudes, curve - ) - - source_paths = get_default_source_paths(EmbeddedInventory()) - vulnerability_models = DictBasedVulnerabilityModels( - { - PowerGeneratingAsset: [InundationModel()], - RealEstateAsset: [ - RealEstateCoastalInundationModel(), - RealEstateRiverineInundationModel(), - ], - } - ) - - response = requests._get_asset_impacts( - request, - ZarrHazardModel(source_paths=source_paths, reader=ZarrReader(store)), - vulnerability_models=vulnerability_models, - ) + response = requests._get_asset_impacts( + request, + ZarrHazardModel(source_paths=source_paths, reader=ZarrReader(store)), + vulnerability_models=vulnerability_models, + ) - self.assertEqual( - response.asset_impacts[0].impacts[0].hazard_type, "CoastalInundation" - ) + assert response.asset_impacts[0].impacts[0].hazard_type == "CoastalInundation" - def test_thermal_power_generation(self): - latitudes = np.array([32.6017]) - longitudes = np.array([-87.7811]) - - assets = [ - ThermalPowerGeneratingAsset( - latitude=latitudes[0], - longitude=longitudes[0], - location="North America", - capacity=1288.4, - type=archetype, - ) - for archetype in [ - "Gas", - "Gas/Gas", - "Gas/Steam", - "Gas/Steam/Dry", - "Gas/Steam/OnceThrough", - "Gas/Steam/Recirculating", - ] - ] - assets_provided_in_the_request = False +def test_risk_model_impact_request(): + """Tests the risk model functionality of the impact request.""" - request_dict = { - "assets": Assets( - items=( - [ - Asset( - asset_class=asset.__class__.__name__, - latitude=asset.latitude, - longitude=asset.longitude, - type=asset.type, - capacity=asset.capacity, - location=asset.location, - ) - for asset in assets - ] - if assets_provided_in_the_request - else [] - ) - ), - "include_asset_level": True, - "include_calc_details": True, - "years": [2050], - "scenarios": ["ssp585"], + assets = { + "items": [ + { + "asset_class": "RealEstateAsset", + "type": "Buildings/Industrial", + "location": "Asia", + "longitude": TestData.longitudes[0], + "latitude": TestData.latitudes[0], + }, + { + "asset_class": "PowerGeneratingAsset", + "type": "Nuclear", + "location": "Asia", + "longitude": TestData.longitudes[1], + "latitude": TestData.latitudes[1], + }, + ], + } + + request_dict = { + "assets": assets, + "include_asset_level": True, + "include_measures": False, + "include_calc_details": True, + "years": [2080], + "scenarios": ["rcp8p5"], + } + + request = requests.AssetImpactRequest(**request_dict) # type: ignore + + curve = np.array([0.0596, 0.333, 0.505, 0.715, 0.864, 1.003, 1.149, 1.163, 1.163]) + store = mock_hazard_model_store_inundation( + TestData.longitudes, TestData.latitudes, curve + ) + + source_paths = get_default_source_paths(EmbeddedInventory()) + vulnerability_models = DictBasedVulnerabilityModels( + { + PowerGeneratingAsset: [InundationModel()], + RealEstateAsset: [ + RealEstateCoastalInundationModel(), + RealEstateRiverineInundationModel(), + ], } - - request = requests.AssetImpactRequest(**request_dict) # type: ignore - - store, root = zarr_memory_store() - - # Add mock riverine inundation data: - return_periods = [2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0] - shape, t = shape_transform_21600_43200(return_periods=return_periods) - add_curves( - root, - longitudes, - latitudes, - "inundation/wri/v2/inunriver_rcp8p5_MIROC-ESM-CHEM_2030", - shape, - np.array( - [ - 8.378922939300537e-05, - 0.3319014310836792, - 0.7859689593315125, - 1.30947744846344, - 1.6689927577972412, - 2.002290964126587, - 2.416414737701416, - 2.7177860736846924, - 3.008821725845337, - ] - ), - return_periods, - t, - ) - add_curves( - root, - longitudes, - latitudes, - "inundation/wri/v2/inunriver_rcp8p5_MIROC-ESM-CHEM_2050", - shape, - np.array( - [ - 0.001158079132437706, - 0.3938717246055603, - 0.8549619913101196, - 1.3880255222320557, - 1.7519289255142212, - 2.0910017490386963, - 2.5129663944244385, - 2.8202412128448486, - 3.115604877471924, - ] - ), - return_periods, - t, - ) - - # Add mock drought data: - return_periods = [0.0, -1.0, -1.5, -2.0, -2.5, -3.0, -3.6] - shape, t = shape_transform_21600_43200(return_periods=return_periods) - add_curves( - root, - longitudes, - latitudes, - "drought/osc/v1/months_spei12m_below_index_MIROC6_ssp585_2050", - shape, - np.array( - [ - 6.900000095367432, - 1.7999999523162842, - 0.44999998807907104, - 0.06584064255906408, - 0.06584064255906408, - 0.0, - 0.0, - ] - ), - return_periods, - t, - ) - - return_periods = [0.0] - shape, t = shape_transform_21600_43200(return_periods=return_periods) - - # Add mock drought (Jupiter) data: - add_curves( - root, - longitudes, - latitudes, - "drought/jupiter/v1/months_spei3m_below_-2_ssp585_2050", - shape, - np.array([0.06584064255906408]), - return_periods, - t, - ) - - # Add mock water-related risk data: - add_curves( - root, - longitudes, - latitudes, - "water_risk/wri/v2/water_stress_ssp585_2050", - shape, - np.array([0.14204320311546326]), - return_periods, - t, - ) - add_curves( - root, - longitudes, - latitudes, - "water_risk/wri/v2/water_supply_ssp585_2050", - shape, - np.array([76.09415435791016]), - return_periods, - t, - ) - add_curves( - root, - longitudes, - latitudes, - "water_risk/wri/v2/water_supply_historical_1999", - shape, - np.array([88.62285614013672]), - return_periods, - t, - ) - - # Add mock chronic heat data: - add_curves( - root, - longitudes, - latitudes, - "chronic_heat/osc/v2/days_tas_above_25c_ACCESS-CM2_ssp585_2050", - shape, - np.array([148.55369567871094]), - return_periods, - t, - ) - add_curves( - root, - longitudes, - latitudes, - "chronic_heat/osc/v2/days_tas_above_30c_ACCESS-CM2_ssp585_2050", - shape, - np.array([65.30751037597656]), - return_periods, - t, - ) - add_curves( - root, - longitudes, - latitudes, - "chronic_heat/osc/v2/days_tas_above_35c_ACCESS-CM2_ssp585_2050", - shape, - np.array([0.6000000238418579]), - return_periods, - t, - ) - add_curves( - root, - longitudes, - latitudes, - "chronic_heat/osc/v2/days_tas_above_40c_ACCESS-CM2_ssp585_2050", - shape, - np.array([0.0]), - return_periods, - t, - ) - add_curves( - root, - longitudes, - latitudes, - "chronic_heat/osc/v2/days_tas_above_45c_ACCESS-CM2_ssp585_2050", - shape, - np.array([0.0]), - return_periods, - t, - ) - add_curves( - root, - longitudes, - latitudes, - "chronic_heat/osc/v2/days_tas_above_50c_ACCESS-CM2_ssp585_2050", - shape, - np.array([0.0]), - return_periods, - t, - ) - add_curves( - root, - longitudes, - latitudes, - "chronic_heat/osc/v2/days_tas_above_55c_ACCESS-CM2_ssp585_2050", - shape, - np.array([0.0]), - return_periods, - t, - ) - add_curves( - root, - longitudes, - latitudes, - "chronic_heat/osc/v2/days_tas_above_25c_ACCESS-CM2_historical_2005", - shape, - np.array([120.51940155029297]), - return_periods, - t, - ) - add_curves( - root, - longitudes, - latitudes, - "chronic_heat/osc/v2/days_tas_above_30c_ACCESS-CM2_historical_2005", - shape, - np.array([14.839207649230957]), - return_periods, - t, - ) - add_curves( - root, - longitudes, - latitudes, - "chronic_heat/osc/v2/days_tas_above_35c_ACCESS-CM2_historical_2005", - shape, - np.array([0.049863386899232864]), - return_periods, - t, - ) - add_curves( - root, - longitudes, - latitudes, - "chronic_heat/osc/v2/days_tas_above_40c_ACCESS-CM2_historical_2005", - shape, - np.array([0.0]), - return_periods, - t, - ) - add_curves( - root, - longitudes, - latitudes, - "chronic_heat/osc/v2/days_tas_above_45c_ACCESS-CM2_historical_2005", - shape, - np.array([0.0]), - return_periods, - t, - ) - add_curves( - root, - longitudes, - latitudes, - "chronic_heat/osc/v2/days_tas_above_50c_ACCESS-CM2_historical_2005", - shape, - np.array([0.0]), - return_periods, - t, - ) - add_curves( - root, - longitudes, - latitudes, - "chronic_heat/osc/v2/days_tas_above_55c_ACCESS-CM2_historical_2005", - shape, - np.array([0.0]), - return_periods, - t, - ) - - # Add mock water temperature data: - return_periods = [ - 5, - 7.5, - 10, - 12.5, - 15, - 17.5, - 20, - 22.5, - 25, - 27.5, - 30, - 32.5, - 35, - 37.5, - 40, + ) + + response = requests._get_asset_impacts( + request, + ZarrHazardModel(source_paths=source_paths, reader=ZarrReader(store)), + vulnerability_models=vulnerability_models, + ) + + assert response.asset_impacts[0].impacts[0].hazard_type == "CoastalInundation" + + +def test_thermal_power_generation(): + latitudes = np.array([32.6017]) + longitudes = np.array([-87.7811]) + + assets = [ + ThermalPowerGeneratingAsset( + latitude=latitudes[0], + longitude=longitudes[0], + location="North America", + capacity=1288.4, + type=archetype, + ) + for archetype in [ + "Gas", + "Gas/Gas", + "Gas/Steam", + "Gas/Steam/Dry", + "Gas/Steam/OnceThrough", + "Gas/Steam/Recirculating", ] - shape, t = shape_transform_21600_43200(return_periods=return_periods) - add_curves( - root, - longitudes, - latitudes, - "chronic_heat/nluu/v2/weeks_water_temp_above_GFDL_historical_1991", - shape, - np.array( - [ - 52.0, - 51.9, - 49.666668, - 45.066666, - 38.0, - 31.1, - 26.0, - 21.066668, - 14.233334, - 8.0333338, - 5.0999999, - 2.3666666, - 6.6666669, - 3.3333335, - 0.0, - ] - ), - return_periods, - t, - ) - add_curves( - root, - longitudes, - latitudes, - "chronic_heat/nluu/v2/weeks_water_temp_above_GFDL_rcp8p5_2050", - shape, - np.array( - [ - 51.85, - 51.5, - 50.25, - 46.75, - 41.95, - 35.35, - 29.4, - 24.55, - 20.15, - 13.85, - 6.75, - 3.5, - 1.3, - 0.25, - 0.1, - ] - ), - return_periods, - t, - ) - - # Add mock WBGT data: - return_periods = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60] - shape, t = shape_transform_21600_43200(return_periods=return_periods) - add_curves( - root, - longitudes, - latitudes, - "chronic_heat/osc/v2/days_wbgt_above_ACCESS-CM2_ssp585_2050", - shape, - np.array( - [ - 363.65054, - 350.21094, - 303.6388, - 240.48442, - 181.82924, - 128.46844, - 74.400276, - 1.3997267, - 0.0, - 0.0, - 0.0, - 0.0, - ] - ), - return_periods, - t, - ) - add_curves( - root, - longitudes, - latitudes, - "chronic_heat/osc/v2/days_wbgt_above_ACCESS-CM2_historical_2005", - shape, - np.array( - [ - 361.95273, - 342.51804, - 278.8146, - 213.5123, - 157.4511, - 101.78238, - 12.6897545, - 0.0, - 0.0, - 0.0, - 0.0, - 0.0, - ] - ), - return_periods, - t, - ) - - source_paths = get_default_source_paths(EmbeddedInventory()) - vulnerability_models = DictBasedVulnerabilityModels( - { - ThermalPowerGeneratingAsset: [ - ThermalPowerGenerationAirTemperatureModel(), - ThermalPowerGenerationDroughtModel(), - ThermalPowerGenerationRiverineInundationModel(), - ThermalPowerGenerationWaterStressModel(), - ThermalPowerGenerationWaterTemperatureModel(), - ] - } - ) + ] - response = requests._get_asset_impacts( - request, - ZarrHazardModel(source_paths=source_paths, reader=ZarrReader(store)), - vulnerability_models=vulnerability_models, - assets=None if assets_provided_in_the_request else assets, - ) - - # Air Temperature - self.assertAlmostEqual( - response.asset_impacts[0].impacts[0].impact_mean, 0.0075618606988512764 - ) - self.assertAlmostEqual( - response.asset_impacts[1].impacts[0].impact_mean, 0.0075618606988512764 - ) - self.assertAlmostEqual( - response.asset_impacts[2].impacts[0].impact_mean, 0.0025192163596997963 - ) - self.assertAlmostEqual( - response.asset_impacts[3].impacts[0].impact_mean, 0.0025192163596997963 - ) - self.assertAlmostEqual(response.asset_impacts[4].impacts[0].impact_mean, 0.0) - self.assertAlmostEqual(response.asset_impacts[5].impacts[0].impact_mean, 0.0) - - # Drought - self.assertAlmostEqual( - response.asset_impacts[0].impacts[1].impact_mean, 0.0008230079663917424 - ) - self.assertAlmostEqual(response.asset_impacts[1].impacts[1].impact_mean, 0.0) - self.assertAlmostEqual( - response.asset_impacts[2].impacts[1].impact_mean, 0.0008230079663917424 - ) - self.assertAlmostEqual(response.asset_impacts[3].impacts[1].impact_mean, 0.0) - self.assertAlmostEqual( - response.asset_impacts[4].impacts[1].impact_mean, 0.0008230079663917424 - ) - self.assertAlmostEqual( - response.asset_impacts[5].impacts[1].impact_mean, 0.0008230079663917424 - ) - - # Riverine Inundation - self.assertAlmostEqual( - response.asset_impacts[0].impacts[2].impact_mean, 0.0046864436945997625 - ) - self.assertAlmostEqual( - response.asset_impacts[1].impacts[2].impact_mean, 0.0046864436945997625 - ) - self.assertAlmostEqual( - response.asset_impacts[2].impacts[2].impact_mean, 0.0046864436945997625 - ) - self.assertAlmostEqual( - response.asset_impacts[3].impacts[2].impact_mean, 0.0046864436945997625 - ) - self.assertAlmostEqual( - response.asset_impacts[4].impacts[2].impact_mean, 0.0046864436945997625 - ) - self.assertAlmostEqual( - response.asset_impacts[5].impacts[2].impact_mean, 0.0046864436945997625 - ) + assets_provided_in_the_request = False - # Water Stress - self.assertAlmostEqual( - response.asset_impacts[0].impacts[3].impact_mean, 0.010181435900296947 - ) - self.assertAlmostEqual(response.asset_impacts[1].impacts[3].impact_mean, 0.0) - self.assertAlmostEqual( - response.asset_impacts[2].impacts[3].impact_mean, 0.010181435900296947 - ) - self.assertAlmostEqual(response.asset_impacts[3].impacts[3].impact_mean, 0.0) - self.assertAlmostEqual( - response.asset_impacts[4].impacts[3].impact_mean, 0.010181435900296947 - ) - self.assertAlmostEqual( - response.asset_impacts[5].impacts[3].impact_mean, 0.010181435900296947 - ) - - # Water Temperature - self.assertAlmostEqual( - response.asset_impacts[0].impacts[4].impact_mean, 0.1448076958069578 - ) - self.assertAlmostEqual(response.asset_impacts[1].impacts[4].impact_mean, 0.0) - self.assertAlmostEqual( - response.asset_impacts[2].impacts[4].impact_mean, 0.1448076958069578 - ) - self.assertAlmostEqual(response.asset_impacts[3].impacts[4].impact_mean, 0.0) - self.assertAlmostEqual( - response.asset_impacts[4].impacts[4].impact_mean, 0.1448076958069578 - ) - self.assertAlmostEqual( - response.asset_impacts[5].impacts[4].impact_mean, 0.005896707722257193 - ) - - vulnerability_models = DictBasedVulnerabilityModels( - { - ThermalPowerGeneratingAsset: [ - ThermalPowerGenerationDroughtModel( - impact_based_on_a_single_point=True - ), + request_dict = { + "assets": Assets( + items=( + [ + Asset( + asset_class=asset.__class__.__name__, + latitude=asset.latitude, + longitude=asset.longitude, + type=asset.type, + capacity=asset.capacity, + location=asset.location, + ) + for asset in assets ] - } - ) - - response = requests._get_asset_impacts( - request, - ZarrHazardModel(source_paths=source_paths, reader=ZarrReader(store)), - vulnerability_models=vulnerability_models, - assets=None if assets_provided_in_the_request else assets, - ) - - # Drought (Jupiter) - self.assertAlmostEqual( - response.asset_impacts[0].impacts[0].impact_mean, 0.0005859470850072303 - ) - self.assertAlmostEqual(response.asset_impacts[1].impacts[0].impact_mean, 0.0) - self.assertAlmostEqual( - response.asset_impacts[2].impacts[0].impact_mean, 0.0005859470850072303 - ) - self.assertAlmostEqual(response.asset_impacts[3].impacts[0].impact_mean, 0.0) - self.assertAlmostEqual( - response.asset_impacts[4].impacts[0].impact_mean, 0.0005859470850072303 - ) - self.assertAlmostEqual( - response.asset_impacts[5].impacts[0].impact_mean, 0.0005859470850072303 - ) - - @unittest.skip("example, not test") - def test_example_portfolios(self): - example_portfolios = requests._get_example_portfolios() - for assets in example_portfolios: - request_dict = { - "assets": assets, - "include_asset_level": True, - "include_calc_details": False, - "years": [2030, 2040, 2050], - "scenarios": ["ssp585"], - } - container = Container() - requester = container.requester() - response = requester.get( - request_id="get_asset_impact", request_dict=request_dict + if assets_provided_in_the_request + else [] ) - with open("out.json", "w") as f: - f.write(response) - assert response is not None - - @unittest.skip("example, not test") - def test_example_portfolios_risk_measures(self): - assets = { - "items": [ - { - "asset_class": "RealEstateAsset", - "type": "Buildings/Commercial", - "location": "Europe", - "longitude": 11.5391, - "latitude": 48.1485, - } - ], + ), + "include_asset_level": True, + "include_calc_details": True, + "years": [2050], + "scenarios": ["ssp585"], + } + + request = requests.AssetImpactRequest(**request_dict) # type: ignore + + store, root = zarr_memory_store() + + # Add mock riverine inundation data: + return_periods = [2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0] + shape, t = shape_transform_21600_43200(return_periods=return_periods) + add_curves( + root, + longitudes, + latitudes, + "inundation/wri/v2/inunriver_rcp8p5_MIROC-ESM-CHEM_2030", + shape, + np.array( + [ + 8.378922939300537e-05, + 0.3319014310836792, + 0.7859689593315125, + 1.30947744846344, + 1.6689927577972412, + 2.002290964126587, + 2.416414737701416, + 2.7177860736846924, + 3.008821725845337, + ] + ), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "inundation/wri/v2/inunriver_rcp8p5_MIROC-ESM-CHEM_2050", + shape, + np.array( + [ + 0.001158079132437706, + 0.3938717246055603, + 0.8549619913101196, + 1.3880255222320557, + 1.7519289255142212, + 2.0910017490386963, + 2.5129663944244385, + 2.8202412128448486, + 3.115604877471924, + ] + ), + return_periods, + t, + ) + + # Add mock drought data: + return_periods = [0.0, -1.0, -1.5, -2.0, -2.5, -3.0, -3.6] + shape, t = shape_transform_21600_43200(return_periods=return_periods) + add_curves( + root, + longitudes, + latitudes, + "drought/osc/v1/months_spei12m_below_index_MIROC6_ssp585_2050", + shape, + np.array( + [ + 6.900000095367432, + 1.7999999523162842, + 0.44999998807907104, + 0.06584064255906408, + 0.06584064255906408, + 0.0, + 0.0, + ] + ), + return_periods, + t, + ) + + return_periods = [0.0] + shape, t = shape_transform_21600_43200(return_periods=return_periods) + + # Add mock drought (Jupiter) data: + add_curves( + root, + longitudes, + latitudes, + "drought/jupiter/v1/months_spei3m_below_-2_ssp585_2050", + shape, + np.array([0.06584064255906408]), + return_periods, + t, + ) + + # Add mock water-related risk data: + add_curves( + root, + longitudes, + latitudes, + "water_risk/wri/v2/water_stress_ssp585_2050", + shape, + np.array([0.14204320311546326]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "water_risk/wri/v2/water_supply_ssp585_2050", + shape, + np.array([76.09415435791016]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "water_risk/wri/v2/water_supply_historical_1999", + shape, + np.array([88.62285614013672]), + return_periods, + t, + ) + + # Add mock chronic heat data: + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_25c_ACCESS-CM2_ssp585_2050", + shape, + np.array([148.55369567871094]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_30c_ACCESS-CM2_ssp585_2050", + shape, + np.array([65.30751037597656]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_35c_ACCESS-CM2_ssp585_2050", + shape, + np.array([0.6000000238418579]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_40c_ACCESS-CM2_ssp585_2050", + shape, + np.array([0.0]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_45c_ACCESS-CM2_ssp585_2050", + shape, + np.array([0.0]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_50c_ACCESS-CM2_ssp585_2050", + shape, + np.array([0.0]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_55c_ACCESS-CM2_ssp585_2050", + shape, + np.array([0.0]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_25c_ACCESS-CM2_historical_2005", + shape, + np.array([120.51940155029297]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_30c_ACCESS-CM2_historical_2005", + shape, + np.array([14.839207649230957]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_35c_ACCESS-CM2_historical_2005", + shape, + np.array([0.049863386899232864]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_40c_ACCESS-CM2_historical_2005", + shape, + np.array([0.0]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_45c_ACCESS-CM2_historical_2005", + shape, + np.array([0.0]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_50c_ACCESS-CM2_historical_2005", + shape, + np.array([0.0]), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_tas_above_55c_ACCESS-CM2_historical_2005", + shape, + np.array([0.0]), + return_periods, + t, + ) + + # Add mock water temperature data: + return_periods = [ + 5, + 7.5, + 10, + 12.5, + 15, + 17.5, + 20, + 22.5, + 25, + 27.5, + 30, + 32.5, + 35, + 37.5, + 40, + ] + shape, t = shape_transform_21600_43200(return_periods=return_periods) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/nluu/v2/weeks_water_temp_above_GFDL_historical_1991", + shape, + np.array( + [ + 52.0, + 51.9, + 49.666668, + 45.066666, + 38.0, + 31.1, + 26.0, + 21.066668, + 14.233334, + 8.0333338, + 5.0999999, + 2.3666666, + 6.6666669, + 3.3333335, + 0.0, + ] + ), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/nluu/v2/weeks_water_temp_above_GFDL_rcp8p5_2050", + shape, + np.array( + [ + 51.85, + 51.5, + 50.25, + 46.75, + 41.95, + 35.35, + 29.4, + 24.55, + 20.15, + 13.85, + 6.75, + 3.5, + 1.3, + 0.25, + 0.1, + ] + ), + return_periods, + t, + ) + + # Add mock WBGT data: + return_periods = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60] + shape, t = shape_transform_21600_43200(return_periods=return_periods) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_wbgt_above_ACCESS-CM2_ssp585_2050", + shape, + np.array( + [ + 363.65054, + 350.21094, + 303.6388, + 240.48442, + 181.82924, + 128.46844, + 74.400276, + 1.3997267, + 0.0, + 0.0, + 0.0, + 0.0, + ] + ), + return_periods, + t, + ) + add_curves( + root, + longitudes, + latitudes, + "chronic_heat/osc/v2/days_wbgt_above_ACCESS-CM2_historical_2005", + shape, + np.array( + [ + 361.95273, + 342.51804, + 278.8146, + 213.5123, + 157.4511, + 101.78238, + 12.6897545, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + ] + ), + return_periods, + t, + ) + + source_paths = get_default_source_paths(EmbeddedInventory()) + vulnerability_models = DictBasedVulnerabilityModels( + { + ThermalPowerGeneratingAsset: [ + ThermalPowerGenerationAirTemperatureModel(), + ThermalPowerGenerationDroughtModel(), + ThermalPowerGenerationRiverineInundationModel(), + ThermalPowerGenerationWaterStressModel(), + ThermalPowerGenerationWaterTemperatureModel(), + ] + } + ) + + response = requests._get_asset_impacts( + request, + ZarrHazardModel(source_paths=source_paths, reader=ZarrReader(store)), + vulnerability_models=vulnerability_models, + assets=None if assets_provided_in_the_request else assets, + ) + + # Air Temperature + assert response.asset_impacts[0].impacts[0].impact_mean == pytest.approx( + 0.0075618606988512764 + ) + assert response.asset_impacts[1].impacts[0].impact_mean == pytest.approx( + 0.0075618606988512764 + ) + assert response.asset_impacts[2].impacts[0].impact_mean == pytest.approx( + 0.0025192163596997963 + ) + assert response.asset_impacts[3].impacts[0].impact_mean == pytest.approx( + 0.0025192163596997963 + ) + assert response.asset_impacts[4].impacts[0].impact_mean == pytest.approx(0.0) + assert response.asset_impacts[5].impacts[0].impact_mean == pytest.approx(0.0) + + # Drought + assert response.asset_impacts[0].impacts[1].impact_mean == pytest.approx( + 0.0008230079663917424 + ) + assert response.asset_impacts[1].impacts[1].impact_mean == pytest.approx(0.0) + assert response.asset_impacts[2].impacts[1].impact_mean == pytest.approx( + 0.0008230079663917424 + ) + assert response.asset_impacts[3].impacts[1].impact_mean == pytest.approx(0.0) + assert response.asset_impacts[4].impacts[1].impact_mean == pytest.approx( + 0.0008230079663917424 + ) + assert response.asset_impacts[5].impacts[1].impact_mean == pytest.approx( + 0.0008230079663917424 + ) + + # Riverine Inundation + assert response.asset_impacts[0].impacts[2].impact_mean == pytest.approx( + 0.0046864436945997625 + ) + assert response.asset_impacts[1].impacts[2].impact_mean == pytest.approx( + 0.0046864436945997625 + ) + assert response.asset_impacts[2].impacts[2].impact_mean == pytest.approx( + 0.0046864436945997625 + ) + assert response.asset_impacts[3].impacts[2].impact_mean == pytest.approx( + 0.0046864436945997625 + ) + assert response.asset_impacts[4].impacts[2].impact_mean == pytest.approx( + 0.0046864436945997625 + ) + assert response.asset_impacts[5].impacts[2].impact_mean == pytest.approx( + 0.0046864436945997625 + ) + + # Water Stress + assert response.asset_impacts[0].impacts[3].impact_mean == pytest.approx( + 0.010181435900296947 + ) + assert response.asset_impacts[1].impacts[3].impact_mean == pytest.approx(0.0) + assert response.asset_impacts[2].impacts[3].impact_mean == pytest.approx( + 0.010181435900296947 + ) + assert response.asset_impacts[3].impacts[3].impact_mean == pytest.approx(0.0) + assert response.asset_impacts[4].impacts[3].impact_mean == pytest.approx( + 0.010181435900296947 + ) + assert response.asset_impacts[5].impacts[3].impact_mean == pytest.approx( + 0.010181435900296947 + ) + + # Water Temperature + assert response.asset_impacts[0].impacts[4].impact_mean == pytest.approx( + 0.1448076958069578 + ) + assert response.asset_impacts[1].impacts[4].impact_mean == pytest.approx(0.0) + assert response.asset_impacts[2].impacts[4].impact_mean == pytest.approx( + 0.1448076958069578 + ) + assert response.asset_impacts[3].impacts[4].impact_mean == pytest.approx(0.0) + assert response.asset_impacts[4].impacts[4].impact_mean == pytest.approx( + 0.1448076958069578 + ) + assert response.asset_impacts[5].impacts[4].impact_mean == pytest.approx( + 0.005896707722257193 + ) + + vulnerability_models = DictBasedVulnerabilityModels( + { + ThermalPowerGeneratingAsset: [ + ThermalPowerGenerationDroughtModel(impact_based_on_a_single_point=True), + ] } - # 48.1485°, 11.5391° - # 48.1537°, 11.5852° + ) + + response = requests._get_asset_impacts( + request, + ZarrHazardModel(source_paths=source_paths, reader=ZarrReader(store)), + vulnerability_models=vulnerability_models, + assets=None if assets_provided_in_the_request else assets, + ) + + # Drought (Jupiter) + assert response.asset_impacts[0].impacts[0].impact_mean == pytest.approx( + 0.0005859470850072303 + ) + assert response.asset_impacts[1].impacts[0].impact_mean == pytest.approx(0.0) + assert response.asset_impacts[2].impacts[0].impact_mean == pytest.approx( + 0.0005859470850072303 + ) + assert response.asset_impacts[3].impacts[0].impact_mean == pytest.approx(0.0) + assert response.asset_impacts[4].impacts[0].impact_mean == pytest.approx( + 0.0005859470850072303 + ) + assert response.asset_impacts[5].impacts[0].impact_mean == pytest.approx( + 0.0005859470850072303 + ) + + +def test_example_portfolios(test_dir, load_credentials): + example_portfolios = requests._get_example_portfolios() + for assets in example_portfolios: request_dict = { "assets": assets, "include_asset_level": True, - "include_calc_details": True, - "include_measures": True, + "include_calc_details": False, "years": [2030, 2040, 2050], - "scenarios": ["ssp245", "ssp585"], # ["ssp126", "ssp245", "ssp585"], + "scenarios": ["ssp585"], } container = Container() requester = container.requester() response = requester.get( request_id="get_asset_impact", request_dict=request_dict ) - risk_measures_dict = json.loads(response)["risk_measures"] - helper = RiskMeasuresHelper( - TypeAdapter(RiskMeasures).validate_python(risk_measures_dict) - ) - for hazard_type in [ - "RiverineInundation", - "CoastalInundation", - "ChronicHeat", - "Wind", - ]: - scores, measure_values, measure_defns = helper.get_measure( - hazard_type, "ssp585", 2050 - ) - label, description = helper.get_score_details(scores[0], measure_defns[0]) - print(label) + with open(os.path.join(test_dir, "out.json"), "w") as f: + f.write(response) + assert response is not None + + +@pytest.mark.skip(reason="Example, not test.") +def test_example_portfolios_risk_measures(load_credentials): + assets = { + "items": [ + { + "asset_class": "RealEstateAsset", + "type": "Buildings/Commercial", + "location": "Europe", + "longitude": 11.5391, + "latitude": 48.1485, + } + ], + } + # 48.1485°, 11.5391° + # 48.1537°, 11.5852° + request_dict = { + "assets": assets, + "include_asset_level": True, + "include_calc_details": True, + "include_measures": True, + "years": [2030, 2040, 2050], + "scenarios": ["ssp245", "ssp585"], # ["ssp126", "ssp245", "ssp585"], + } + container = Container() + requester = container.requester() + response = requester.get(request_id="get_asset_impact", request_dict=request_dict) + risk_measures_dict = json.loads(response)["risk_measures"] + helper = RiskMeasuresHelper( + TypeAdapter(RiskMeasures).validate_python(risk_measures_dict) + ) + for hazard_type in [ + "RiverineInundation", + "CoastalInundation", + "ChronicHeat", + "Wind", + ]: + scores, measure_values, measure_defns = helper.get_measure( + hazard_type, "ssp585", 2050 + ) + label, description = helper.get_score_details(scores[0], measure_defns[0]) + print(label) diff --git a/tests/api/wri_global_power_plant_database.tbz2 b/tests/api/wri_global_power_plant_database.tbz2 new file mode 100644 index 00000000..6846bf92 Binary files /dev/null and b/tests/api/wri_global_power_plant_database.tbz2 differ diff --git a/tests/base_test.py b/tests/base_test.py deleted file mode 100644 index 5e72175a..00000000 --- a/tests/base_test.py +++ /dev/null @@ -1,21 +0,0 @@ -import os -import pathlib -import shutil -import tempfile -import unittest - -from dotenv import load_dotenv - - -class TestWithCredentials(unittest.TestCase): - """Test that attempts to load contents of credentials.env into environment variables (if present)""" - - def setUp(self): - self.test_dir = tempfile.mkdtemp() - dotenv_dir = os.environ.get("CREDENTIAL_DOTENV_DIR", os.getcwd()) - dotenv_path = pathlib.Path(dotenv_dir) / "credentials.env" - if os.path.exists(dotenv_path): - load_dotenv(dotenv_path=dotenv_path, override=True) - - def tearDown(self): - shutil.rmtree(self.test_dir) diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..92b87ad6 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,69 @@ +import os +import pathlib +import shutil +import tarfile +import tempfile + +import pandas as pd +import pytest +from dotenv import load_dotenv + + +@pytest.fixture(scope="function") +def test_dir(): + # Setup + test_dir = tempfile.mkdtemp() + yield test_dir + # Teardown + shutil.rmtree(test_dir) + + +@pytest.fixture( + scope="function", +) +def load_credentials(): + dotenv_dir = os.environ.get("CREDENTIAL_DOTENV_DIR", os.getcwd()) + dotenv_path = pathlib.Path(dotenv_dir) / "credentials.env" + if os.path.exists(dotenv_path): + load_dotenv(dotenv_path=dotenv_path, override=True) + + +@pytest.fixture( + scope="function", +) +def wri_power_plant_assets(): + """ + Load the WRI Global Power Plant Database dataset. + + This function extracts the `global_power_plant_database.csv` file from the + `wri_global_power_plant_database.tbz2` archive and loads it into a pandas DataFrame. + + The dataset is sourced from the World Resources Institute's (WRI) Global Power Plant Database. + The original dataset and more information can be found at: + https://datasets.wri.org/dataset/globalpowerplantdatabase + + License: + The dataset is provided under the Creative Commons Attribution 4.0 International (CC BY 4.0) license. + This means you are free to: + - Share: copy and redistribute the material in any medium or format + - Adapt: remix, transform, and build upon the material for any purpose, even commercially. + + Under the following terms: + - Attribution: You must give appropriate credit, provide a link to the license, and indicate if changes were made. + You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. + + More details about the license can be found at: https://creativecommons.org/licenses/by/4.0/ + + Note: + The original README file and a copy of the CC BY 4.0 license are included in the tar.bz2 archive. + + Returns: + pandas.DataFrame: A DataFrame containing the Global Power Plant Database. + """ + with tarfile.open( + "./tests/api/wri_global_power_plant_database.tbz2", "r:bz2" + ) as tf: + with tf.extractfile("global_power_plant_database.csv") as f: + asset_list = pd.read_csv(f, low_memory=False) + + return asset_list diff --git a/tests/data/events_retrieval_test.py b/tests/data/events_retrieval_test.py index 9f5f02dd..c0ed277c 100644 --- a/tests/data/events_retrieval_test.py +++ b/tests/data/events_retrieval_test.py @@ -1,9 +1,6 @@ import os -import unittest -# import fsspec.implementations.local as local # type: ignore import numpy as np -import numpy.testing import scipy.interpolate import zarr from fsspec.implementations.memory import MemoryFileSystem @@ -20,285 +17,274 @@ from physrisk.data.zarr_reader import ZarrReader from physrisk.kernel.hazard_model import HazardDataRequest from physrisk.kernel.hazards import RiverineInundation +from physrisk.kernel import calculation # noqa: F401 ## Avoid circular imports from physrisk.requests import _get_hazard_data_availability -# from pathlib import PurePosixPath -from ..base_test import TestWithCredentials from ..data.hazard_model_store_test import ( ZarrStoreMocker, mock_hazard_model_store_inundation, ) -class TestEventRetrieval(TestWithCredentials): - @unittest.skip("S3 access needed") - def test_inventory_change(self): - # check validation passes calling in service-like way - embedded = EmbeddedInventory() - resources1 = embedded.to_resources() - inventory = Inventory(resources1).json_ordered() - with open( - os.path.join(os.path.dirname(os.path.abspath(__file__)), "inventory.json"), - "w", - ) as f: - f.write(inventory) - - def test_hazard_data_availability_summary(self): - # check validation passes calling in service-like way - inventory = EmbeddedInventory() - response = _get_hazard_data_availability( - HazardAvailabilityRequest(sources=["embedded"]), - inventory, - inventory.colormaps(), - ) # , "hazard_test"]) - assert len(response.models) > 0 # rely on Pydantic validation for test - - def test_set_get_inventory(self): - fs = MemoryFileSystem() - reader = InventoryReader(fs=fs) - reader.append("hazard_test", [self._test_hazard_model()]) - assert reader.read("hazard_test")[0].indicator_id == "test_indicator_id" - - @unittest.skip("S3 access needed") - def test_set_get_inventory_s3(self): - reader = InventoryReader() - reader.append("hazard_test", [self._test_hazard_model()]) - assert reader.read("hazard_test")[0].id == "test_indicator_id" - - def _test_hazard_model(self): - return HazardResource( - hazard_type="TestHazardType", - indicator_id="test_indicator_id", - indicator_model_gcm="test_gcm", - path="test_array_path", - display_name="Test hazard indicator", - description="Description of test hazard indicator", - scenarios=[Scenario(id="historical", years=[2010])], - units="K", - ) +def test_inventory_change(test_dir): + embedded = EmbeddedInventory() + resources1 = embedded.resources.values() + inventory = Inventory(resources1).json_ordered() + with open( + os.path.join(test_dir, "inventory.json"), + "w", + ) as f: + f.write(inventory) - def test_zarr_bilinear(self): - # create suitable asymmetric data set and compare with scipy - xt, yt = np.meshgrid(np.linspace(-5, 5, 10), np.linspace(-5, 5, 10)) - data0 = np.exp(-(xt**2 / 25.0 + yt**2 / 16.0)) - data1 = np.exp(-(xt**2 / 36.0 + yt**2 / 25.0)) +def test_hazard_data_availability_summary(): + inventory = EmbeddedInventory() + response = _get_hazard_data_availability( + HazardAvailabilityRequest(sources=["embedded"]), + inventory, + inventory.colormaps(), + ) + assert len(response.models) > 0 - data = np.stack([data0, data1], axis=0) - # note that zarr array has index [z, y, x], e.g. 9, 21600, 43200 or [index, lat, lon] - y = np.array([1.4, 2.8, 3.4]) # row indices - x = np.array([3.2, 6.7, 7.9]) # column indices - image_coords = np.stack([x, y]) - data_zarr = zarr.array(data) - candidate_lin = ZarrReader._linear_interp_frac_coordinates( - data_zarr, image_coords, np.array([0, 1]), interpolation="linear" - ) - candidate_max = ZarrReader._linear_interp_frac_coordinates( - data_zarr, image_coords, np.array([0, 1]), interpolation="max" - ) - candidate_min = ZarrReader._linear_interp_frac_coordinates( - data_zarr, image_coords, np.array([0, 1]), interpolation="min" - ) +def test_set_get_inventory(): + fs = MemoryFileSystem() + reader = InventoryReader(fs=fs) + reader.append("hazard_test", [_test_hazard_model()]) + assert reader.read("hazard_test")[0].indicator_id == "test_indicator_id" - image_coords_surr = np.stack( - [ - np.concatenate( - [np.floor(x), np.floor(x) + 1, np.floor(x), np.floor(x) + 1] - ), - np.concatenate( - [np.floor(y), np.floor(y), np.floor(y) + 1, np.floor(y) + 1] - ), - ] - ) - values_surr = ZarrReader._linear_interp_frac_coordinates( - data_zarr, image_coords_surr, np.array([0, 1]), interpolation="linear" - ).reshape((4, 3, 2)) - interp_scipy0 = scipy.interpolate.RectBivariateSpline( - np.linspace(0, 9, 10), np.linspace(0, 9, 10), data0.T, kx=1, ky=1 - ) - interp_scipy1 = scipy.interpolate.RectBivariateSpline( - np.linspace(0, 9, 10), np.linspace(0, 9, 10), data1.T, kx=1, ky=1 - ) - expected0_lin = interp_scipy0(x, y).diagonal().reshape(len(y)) - expected1_lin = interp_scipy1(x, y).diagonal().reshape(len(y)) - expected0_max = np.max(values_surr[:, :, 0], axis=0) - expected1_max = np.max(values_surr[:, :, 1], axis=0) - expected0_min = np.min(values_surr[:, :, 0], axis=0) - expected1_min = np.min(values_surr[:, :, 1], axis=0) - - numpy.testing.assert_allclose(candidate_lin[:, 0], expected0_lin, rtol=1e-6) - numpy.testing.assert_allclose(candidate_lin[:, 1], expected1_lin, rtol=1e-6) - numpy.testing.assert_allclose(candidate_max[:, 0], expected0_max, rtol=1e-6) - numpy.testing.assert_allclose(candidate_max[:, 1], expected1_max, rtol=1e-6) - numpy.testing.assert_allclose(candidate_min[:, 0], expected0_min, rtol=1e-6) - numpy.testing.assert_allclose(candidate_min[:, 1], expected1_min, rtol=1e-6) - - def test_zarr_bilinear_with_bad_data(self): - # create suitable asymmetric data set and compare with scipy - - xt, yt = np.meshgrid(np.linspace(0, 1, 2), np.linspace(0, 1, 2)) - data = np.array([[[1.0, -9999.0], [2.0, 0.0]]]) - - # note that zarr array has index [z, y, x], e.g. 9, 21600, 43200 or [index, lat, lon] - y = np.array([0.4, 0.5, 0.8]) # row indices - x = np.array([0.1, 0.6, 0.7]) # column indices - image_coords = np.stack([x, y]) - data_zarr = zarr.array(data) - - candidate_lin = ZarrReader._linear_interp_frac_coordinates( - data_zarr, image_coords, np.array([0]), interpolation="linear" - ).flatten() - candidate_max = ZarrReader._linear_interp_frac_coordinates( - data_zarr, image_coords, np.array([0]), interpolation="max" - ).flatten() - candidate_min = ZarrReader._linear_interp_frac_coordinates( - data_zarr, image_coords, np.array([0]), interpolation="min" - ).flatten() - - expected_lin = np.array([1.34042553, 0.85714286, 0.62790698]) - expected_max = np.array([2.0, 2.0, 2.0]) - expected_min = np.array([0.0, 0.0, 0.0]) - - numpy.testing.assert_allclose(candidate_lin, expected_lin, rtol=1e-6) - numpy.testing.assert_allclose(candidate_max, expected_max, rtol=1e-6) - numpy.testing.assert_allclose(candidate_min, expected_min, rtol=1e-6) - - def test_zarr_geomax_on_grid(self): - lons_ = np.array([3.92783]) - lats_ = np.array([50.882394]) - curve = np.array( - [ - 0.00, - 0.06997928, - 0.2679602, - 0.51508933, - 0.69842442, - 0.88040525, - 1.11911115, - 1.29562478, - 1.47200677, - ] - ) - set_id = r"inundation/wri/v2\\inunriver_rcp8p5_MIROC-ESM-CHEM_2080" - interpolation = "linear" - delta_km = 0.100 - n_grid = 11 - store_ = mock_hazard_model_store_inundation(lons_, lats_, curve) - zarrreader_ = ZarrReader(store_) - - lons_ = np.array([3.92916667, 3.925] + list(lons_)) - lats_ = np.array([50.87916667, 50.88333333] + list(lats_)) - curves_max_candidate, _ = zarrreader_.get_max_curves_on_grid( - set_id, - lons_, - lats_, - interpolation=interpolation, - delta_km=delta_km, - n_grid=n_grid, - ) - curves_max_expected = np.array( - [ - curve, - [ - 0.0, - 0.02272942, - 0.08703404, - 0.16730212, - 0.22684974, - 0.28595751, - 0.3634897, - 0.42082168, - 0.47811095, - ], - [ - 0.0, - 0.0432026, - 0.16542863, - 0.31799695, - 0.43118118, - 0.54352937, - 0.69089751, - 0.7998704, - 0.90876211, - ], - ] - ) - numpy.testing.assert_allclose( - curves_max_candidate, curves_max_expected, rtol=1e-6 - ) +def test_set_get_inventory_s3(load_credentials): + reader = InventoryReader() + reader.append("hazard_test", [_test_hazard_model()]) + assert reader.read("hazard_test")[0].indicator_id == "test_indicator_id" - def test_zarr_geomax(self): - longitudes = np.array([3.926]) - latitudes = np.array([50.878]) - curve = np.array( - [0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4], - ) - set_id = r"inundation/wri/v2\\inunriver_rcp8p5_MIROC-ESM-CHEM_2080" - delta_deg = 0.1 - shapes = [ - Polygon( - ( - (x - 0.5 * delta_deg, y - 0.5 * delta_deg), - (x - 0.5 * delta_deg, y + 0.5 * delta_deg), - (x + 0.5 * delta_deg, y + 0.5 * delta_deg), - (x + 0.5 * delta_deg, y - 0.5 * delta_deg), - ) - ) - for x, y in zip(longitudes, latitudes) + +def _test_hazard_model(): + return HazardResource( + hazard_type="TestHazardType", + indicator_id="test_indicator_id", + indicator_model_gcm="test_gcm", + path="test_array_path", + display_name="Test hazard indicator", + description="Description of test hazard indicator", + scenarios=[Scenario(id="historical", years=[2010])], + units="K", + ) + + +def test_zarr_bilinear(): + xt, yt = np.meshgrid(np.linspace(-5, 5, 10), np.linspace(-5, 5, 10)) + data0 = np.exp(-(xt**2 / 25.0 + yt**2 / 16.0)) + data1 = np.exp(-(xt**2 / 36.0 + yt**2 / 25.0)) + + data = np.stack([data0, data1], axis=0) + + y = np.array([1.4, 2.8, 3.4]) + x = np.array([3.2, 6.7, 7.9]) + image_coords = np.stack([x, y]) + data_zarr = zarr.array(data) + candidate_lin = ZarrReader._linear_interp_frac_coordinates( + data_zarr, image_coords, np.array([0, 1]), interpolation="linear" + ) + candidate_max = ZarrReader._linear_interp_frac_coordinates( + data_zarr, image_coords, np.array([0, 1]), interpolation="max" + ) + candidate_min = ZarrReader._linear_interp_frac_coordinates( + data_zarr, image_coords, np.array([0, 1]), interpolation="min" + ) + + image_coords_surr = np.stack( + [ + np.concatenate( + [np.floor(x), np.floor(x) + 1, np.floor(x), np.floor(x) + 1] + ), + np.concatenate( + [np.floor(y), np.floor(y), np.floor(y) + 1, np.floor(y) + 1] + ), ] - store = mock_hazard_model_store_inundation(longitudes, latitudes, curve) - zarr_reader = ZarrReader(store) - curves_max_expected = np.array([[0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4]]) + ) + values_surr = ZarrReader._linear_interp_frac_coordinates( + data_zarr, image_coords_surr, np.array([0, 1]), interpolation="linear" + ).reshape((4, 3, 2)) + interp_scipy0 = scipy.interpolate.RectBivariateSpline( + np.linspace(0, 9, 10), np.linspace(0, 9, 10), data0.T, kx=1, ky=1 + ) + interp_scipy1 = scipy.interpolate.RectBivariateSpline( + np.linspace(0, 9, 10), np.linspace(0, 9, 10), data1.T, kx=1, ky=1 + ) + expected0_lin = interp_scipy0(x, y).diagonal().reshape(len(y)) + expected1_lin = interp_scipy1(x, y).diagonal().reshape(len(y)) + expected0_max = np.max(values_surr[:, :, 0], axis=0) + expected1_max = np.max(values_surr[:, :, 1], axis=0) + expected0_min = np.min(values_surr[:, :, 0], axis=0) + expected1_min = np.min(values_surr[:, :, 1], axis=0) - curves_max_candidate, _, _ = zarr_reader.get_max_curves( - set_id, shapes, interpolation="floor" - ) - numpy.testing.assert_allclose( - curves_max_candidate, curves_max_expected, rtol=1e-6 - ) + np.testing.assert_allclose(candidate_lin[:, 0], expected0_lin, rtol=1e-6) + np.testing.assert_allclose(candidate_lin[:, 1], expected1_lin, rtol=1e-6) + np.testing.assert_allclose(candidate_max[:, 0], expected0_max, rtol=1e-6) + np.testing.assert_allclose(candidate_max[:, 1], expected1_max, rtol=1e-6) + np.testing.assert_allclose(candidate_min[:, 0], expected0_min, rtol=1e-6) + np.testing.assert_allclose(candidate_min[:, 1], expected1_min, rtol=1e-6) - curves_max_candidate, _, _ = zarr_reader.get_max_curves( - set_id, shapes, interpolation="linear" - ) - numpy.testing.assert_allclose( - curves_max_candidate, curves_max_expected / 4, rtol=1e-6 - ) - def test_reproject(self): - """Test adding data in a non-ESPG-4326 coordinate reference system. Check that attribute - end in the correct convertion.""" - mocker = ZarrStoreMocker() - lons = [1.1, -0.31] - lats = [47.0, 52.0] - mocker._add_curves( - "test", - lons, - lats, - "epsg:3035", - [3, 39420, 38371], - [100.0, 0.0, 2648100.0, 0.0, -100.0, 5404500], - [10.0, 100.0, 1000.0], - [1.0, 2.0, 3.0], - ) +def test_zarr_bilinear_with_bad_data(): + xt, yt = np.meshgrid(np.linspace(0, 1, 2), np.linspace(0, 1, 2)) + data = np.array([[[1.0, -9999.0], [2.0, 0.0]]]) + + y = np.array([0.4, 0.5, 0.8]) + x = np.array([0.1, 0.6, 0.7]) + image_coords = np.stack([x, y]) + data_zarr = zarr.array(data) + + candidate_lin = ZarrReader._linear_interp_frac_coordinates( + data_zarr, image_coords, np.array([0]), interpolation="linear" + ).flatten() + candidate_max = ZarrReader._linear_interp_frac_coordinates( + data_zarr, image_coords, np.array([0]), interpolation="max" + ).flatten() + candidate_min = ZarrReader._linear_interp_frac_coordinates( + data_zarr, image_coords, np.array([0]), interpolation="min" + ).flatten() + + expected_lin = np.array([1.34042553, 0.85714286, 0.62790698]) + expected_max = np.array([2.0, 2.0, 2.0]) + expected_min = np.array([0.0, 0.0, 0.0]) - source_paths = { - RiverineInundation: lambda indicator_id, scenario, year, hint: "test" - } - hazard_model = ZarrHazardModel(source_paths=source_paths, store=mocker.store) - response = hazard_model.get_hazard_events( + np.testing.assert_allclose(candidate_lin, expected_lin, rtol=1e-6) + np.testing.assert_allclose(candidate_max, expected_max, rtol=1e-6) + np.testing.assert_allclose(candidate_min, expected_min, rtol=1e-6) + + +def test_zarr_geomax_on_grid(): + lons_ = np.array([3.92783]) + lats_ = np.array([50.882394]) + curve = np.array( + [ + 0.00, + 0.06997928, + 0.2679602, + 0.51508933, + 0.69842442, + 0.88040525, + 1.11911115, + 1.29562478, + 1.47200677, + ] + ) + set_id = r"inundation/wri/v2\\inunriver_rcp8p5_MIROC-ESM-CHEM_2080" + interpolation = "linear" + delta_km = 0.100 + n_grid = 11 + store_ = mock_hazard_model_store_inundation(lons_, lats_, curve) + zarrreader_ = ZarrReader(store_) + + lons_ = np.array([3.92916667, 3.925] + list(lons_)) + lats_ = np.array([50.87916667, 50.88333333] + list(lats_)) + curves_max_candidate, _ = zarrreader_.get_max_curves_on_grid( + set_id, + lons_, + lats_, + interpolation=interpolation, + delta_km=delta_km, + n_grid=n_grid, + ) + + curves_max_expected = np.array( + [ + curve, [ - HazardDataRequest( - RiverineInundation, - lons[0], - lats[0], - indicator_id="", - scenario="", - year=2050, - ) - ] - ) - numpy.testing.assert_equal( - next(iter(response.values())).intensities, [1.0, 2.0, 3.0] + 0.0, + 0.02272942, + 0.08703404, + 0.16730212, + 0.22684974, + 0.28595751, + 0.3634897, + 0.42082168, + 0.47811095, + ], + [ + 0.0, + 0.0432026, + 0.16542863, + 0.31799695, + 0.43118118, + 0.54352937, + 0.69089751, + 0.7998704, + 0.90876211, + ], + ] + ) + np.testing.assert_allclose(curves_max_candidate, curves_max_expected, rtol=1e-6) + + +def test_zarr_geomax(): + longitudes = np.array([3.926]) + latitudes = np.array([50.878]) + curve = np.array( + [0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4], + ) + set_id = r"inundation/wri/v2\\inunriver_rcp8p5_MIROC-ESM-CHEM_2080" + delta_deg = 0.1 + shapes = [ + Polygon( + ( + (x - 0.5 * delta_deg, y - 0.5 * delta_deg), + (x - 0.5 * delta_deg, y + 0.5 * delta_deg), + (x + 0.5 * delta_deg, y + 0.5 * delta_deg), + (x + 0.5 * delta_deg, y - 0.5 * delta_deg), + ) ) + for x, y in zip(longitudes, latitudes) + ] + store = mock_hazard_model_store_inundation(longitudes, latitudes, curve) + zarr_reader = ZarrReader(store) + curves_max_expected = np.array([[0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4]]) + + curves_max_candidate, _, _ = zarr_reader.get_max_curves( + set_id, shapes, interpolation="floor" + ) + np.testing.assert_allclose(curves_max_candidate, curves_max_expected, rtol=1e-6) + + curves_max_candidate, _, _ = zarr_reader.get_max_curves( + set_id, shapes, interpolation="linear" + ) + np.testing.assert_allclose(curves_max_candidate, curves_max_expected / 4, rtol=1e-6) + + +def test_reproject(): + """Test adding data in a non-ESPG-4326 coordinate reference system. Check that attribute + end in the correct convertion.""" + mocker = ZarrStoreMocker() + lons = [1.1, -0.31] + lats = [47.0, 52.0] + mocker._add_curves( + "test", + lons, + lats, + "epsg:3035", + [3, 39420, 38371], + [100.0, 0.0, 2648100.0, 0.0, -100.0, 5404500], + [10.0, 100.0, 1000.0], + [1.0, 2.0, 3.0], + ) + + source_paths = { + RiverineInundation: lambda indicator_id, scenario, year, hint: "test" + } + hazard_model = ZarrHazardModel(source_paths=source_paths, store=mocker.store) + response = hazard_model.get_hazard_events( + [ + HazardDataRequest( + RiverineInundation, + lons[0], + lats[0], + indicator_id="", + scenario="", + year=2050, + ) + ] + ) + np.testing.assert_equal(next(iter(response.values())).intensities, [1.0, 2.0, 3.0]) diff --git a/tests/data/static_data_test.py b/tests/data/static_data_test.py index cc9f57c7..f121b8f9 100644 --- a/tests/data/static_data_test.py +++ b/tests/data/static_data_test.py @@ -1,29 +1,25 @@ -import unittest +import os from physrisk.data.static.world import ( World, get_countries_and_continents, get_countries_json, ) - from ..data.hazard_model_store_test import TestData -class TestStaticDate(unittest.TestCase): - @unittest.skip( - "example that requires geopandas (consider adding for running tests only)" +def test_get_countries_and_continents(): + countries, continents = get_countries_and_continents( + TestData.longitudes, TestData.latitudes ) - def test_get_countries_and_continents(self): - countries, continents = get_countries_and_continents( - TestData.longitudes, TestData.latitudes - ) - self.assertEqual(countries[0:3], ["Afghanistan", "Afghanistan", "Albania"]) + assert countries[0:3] == ["Afghanistan", "Afghanistan", "Albania"] + + +def test_get_countries_json(test_dir): + with open(os.path.join(test_dir, "world.json"), "w") as f: + world_json = get_countries_json() + f.write(world_json) - @unittest.skip("not really a test; just showing how world.json was generated") - def test_get_countries_json(self): - with open("world.json", "w") as f: - world_json = get_countries_json() - f.write(world_json) - def test_get_load_world(self): - self.assertEqual(World.countries["United Kingdom"].continent, "Europe") +def test_get_load_world(): + assert World.countries["United Kingdom"].continent == "Europe" diff --git a/tests/kernel/asset_impact_test.py b/tests/kernel/asset_impact_test.py index 5726f12a..5d1eb5f6 100644 --- a/tests/kernel/asset_impact_test.py +++ b/tests/kernel/asset_impact_test.py @@ -1,6 +1,6 @@ """Test asset impact calculations.""" -import unittest +import pytest import numpy as np @@ -9,6 +9,7 @@ from physrisk.kernel.hazard_event_distrib import HazardEventDistrib from physrisk.kernel.hazard_model import HazardDataRequest from physrisk.kernel.hazards import RiverineInundation +from physrisk.kernel import calculation # noqa: F401 ## Avoid circular imports from physrisk.kernel.impact import ImpactDistrib from physrisk.kernel.vulnerability_distrib import VulnerabilityDistrib from physrisk.vulnerability_models.real_estate_models import ( @@ -17,176 +18,156 @@ ) -class TestAssetImpact(unittest.TestCase): - """Tests asset impact calculations.""" - - def test_impact_curve(self): - """Testing the generation of an asset when only an impact curve (e.g. damage curve is available)""" - - # exceedance curve - return_periods = np.array( - [2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0] - ) - exceed_probs = 1.0 / return_periods - depths = np.array( - [ - 0.059601218, - 0.33267087, - 0.50511575, - 0.71471703, - 0.8641244, - 1.0032823, - 1.1491022, - 1.1634114, - 1.1634114, - ] - ) - curve = ExceedanceCurve(exceed_probs, depths) - - # impact curve - vul_depths = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1]) - vul_impacts = np.array([0, 1, 2, 7, 14, 30, 60, 180, 365]) - - # say we need to add an extra depth point because the damage below that inundation depth is zero - cutoff_depth = 0.9406518 # 0.75 - curve = curve.add_value_point(cutoff_depth) - # we could also choose ensure that all impact curve depth points are - # represented in exceedance curve; we do not here - - depth_bins, probs = curve.get_probability_bins() - - impact_bins = np.interp(depth_bins, vul_depths, vul_impacts) - - include_bin = depth_bins < cutoff_depth - probs[include_bin[:-1]] = 0 # type: ignore - - mean = np.sum((impact_bins[1:] + impact_bins[:-1]) * probs / 2) # type: ignore - self.assertAlmostEqual(mean, 4.8453897) - - def test_protection_level(self): - return_periods = np.array( - [2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0] - ) - base_depth = np.array( - [ - 0.0, - 0.22372675, - 0.3654859, - 0.5393629, - 0.6642473, - 0.78564394, - 0.9406518, - 1.0539534, - 1.1634114, - ] - ) - # future_depth = np.array( - # [0.059601218, 0.33267087, 0.50511575, 0.71471703, 0.8641244, 1.0032823, 1.1491022, 1.1634114, 1.1634114] - # ) - - exceed_probs = 1.0 / return_periods - - protection_return_period = 250.0 # protection level of 250 years - protection_depth = np.interp( - 1.0 / protection_return_period, exceed_probs[::-1], base_depth[::-1] - ) - - self.assertAlmostEqual(protection_depth, 0.9406518) # type: ignore - - def test_single_asset_impact(self): - # exceedance curve - return_periods = np.array( - [2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0] - ) - exceed_probs = 1.0 / return_periods - depths = np.array( - [ - 0.059601218, - 0.33267087, - 0.50511575, - 0.71471703, - 0.8641244, - 1.0032823, - 1.1491022, - 1.1634114, - 1.1634114, - ] - ) - curve = ExceedanceCurve(exceed_probs, depths) - - cutoff_depth = 0.9406518 - curve = curve.add_value_point(cutoff_depth) - - # impact curve - vul_depths = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1]) - vul_impacts = np.array([0, 1, 2, 7, 14, 30, 60, 180, 365]) - - depth_bins, probs = curve.get_probability_bins() +def test_impact_curve(): + """Testing the generation of an asset when only an impact curve (e.g. damage curve is available)""" + + # exceedance curve + return_periods = np.array([2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0]) + exceed_probs = 1.0 / return_periods + depths = np.array( + [ + 0.059601218, + 0.33267087, + 0.50511575, + 0.71471703, + 0.8641244, + 1.0032823, + 1.1491022, + 1.1634114, + 1.1634114, + ] + ) + curve = ExceedanceCurve(exceed_probs, depths) - impact_bins = np.interp(depth_bins, vul_depths, vul_impacts) + vul_depths = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1]) + vul_impacts = np.array([0, 1, 2, 7, 14, 30, 60, 180, 365]) - # if upper end of bin less then cutoff then exclude - probs_w_cutoff = np.where(depth_bins[1:] <= cutoff_depth, 0.0, 1.0) - # n_bins = len(probs) # type: ignore - vul = VulnerabilityDistrib( - type(RiverineInundation), depth_bins, impact_bins, np.diag(probs_w_cutoff) - ) # np.eye(n_bins, n_bins)) - hazard_paths = ["unknown"] - event = HazardEventDistrib( - type(RiverineInundation), depth_bins, probs, hazard_paths - ) # type: ignore + cutoff_depth = 0.9406518 + curve = curve.add_value_point(cutoff_depth) - impact_prob = vul.prob_matrix.T @ event.prob - impact = ImpactDistrib(vul.event_type, vul.impact_bins, impact_prob, event.path) + depth_bins, probs = curve.get_probability_bins() - mean = impact.mean_impact() + impact_bins = np.interp(depth_bins, vul_depths, vul_impacts) - self.assertAlmostEqual(mean, 4.8453897) + include_bin = depth_bins < cutoff_depth + probs[include_bin[:-1]] = 0 - def test_performance_hazardlookup(self): - """Just for reference: not true test""" - asset_requests = {} - import time + mean = np.sum((impact_bins[1:] + impact_bins[:-1]) * probs / 2) + assert mean == pytest.approx(4.8453897) - start = time.time() - assets = [ - RealEstateAsset(latitude=0, longitude=0, location="", type="") - for _ in range(10000) +def test_protection_level(): + return_periods = np.array([2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0]) + base_depth = np.array( + [ + 0.0, + 0.22372675, + 0.3654859, + 0.5393629, + 0.6642473, + 0.78564394, + 0.9406518, + 1.0539534, + 1.1634114, ] - - vulnerability_models = [ - RealEstateCoastalInundationModel(), - RealEstateRiverineInundationModel(), + ) + + exceed_probs = 1.0 / return_periods + + protection_return_period = 250.0 # protection level of 250 years + protection_depth = np.interp( + 1.0 / protection_return_period, exceed_probs[::-1], base_depth[::-1] + ) + + assert protection_depth == pytest.approx(0.9406518) + + +def test_single_asset_impact(): + # exceedance curve + return_periods = np.array([2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0]) + exceed_probs = 1.0 / return_periods + depths = np.array( + [ + 0.059601218, + 0.33267087, + 0.50511575, + 0.71471703, + 0.8641244, + 1.0032823, + 1.1491022, + 1.1634114, + 1.1634114, ] + ) + curve = ExceedanceCurve(exceed_probs, depths) + + cutoff_depth = 0.9406518 + curve = curve.add_value_point(cutoff_depth) + + vul_depths = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1]) + vul_impacts = np.array([0, 1, 2, 7, 14, 30, 60, 180, 365]) + + depth_bins, probs = curve.get_probability_bins() + + impact_bins = np.interp(depth_bins, vul_depths, vul_impacts) + + probs_w_cutoff = np.where(depth_bins[1:] <= cutoff_depth, 0.0, 1.0) + vul = VulnerabilityDistrib( + type(RiverineInundation), depth_bins, impact_bins, np.diag(probs_w_cutoff) + ) + hazard_paths = ["unknown"] + event = HazardEventDistrib( + type(RiverineInundation), depth_bins, probs, hazard_paths + ) + + impact_prob = vul.prob_matrix.T @ event.prob + impact = ImpactDistrib(vul.event_type, vul.impact_bins, impact_prob, event.path) + + mean = impact.mean_impact() + + assert mean == pytest.approx(4.8453897) + + +def test_performance_hazardlookup(): + asset_requests = {} + import time + + start = time.time() + + assets = [ + RealEstateAsset(latitude=0, longitude=0, location="", type="") + for _ in range(10000) + ] + vulnerability_models = [ + RealEstateCoastalInundationModel(), + RealEstateRiverineInundationModel(), + ] + + time_assets = time.time() - start + print(f"Time for asset generation {time_assets}s ") + start = time.time() + + # create requests: + for v in vulnerability_models: + for a in assets: + asset_requests[(v, a)] = [ + HazardDataRequest( + RiverineInundation, + 0, + 0, + indicator_id="", + scenario="", + year=2030, + ) + ] + + time_requests = time.time() - start + print(f"Time for requests dictionary creation {time_requests}s ") + start = time.time() + + for key in asset_requests: + if asset_requests[key][0].longitude != 0: + raise Exception() - time_assets = time.time() - start - print(f"Time for asset generation {time_assets}s ") - start = time.time() - # we key requests via model and assets; let's check dictionary look-up is fast enough - # (there are less simple alternatives) - - # create requests: - for v in vulnerability_models: - for a in assets: - asset_requests[(v, a)] = [ - HazardDataRequest( - RiverineInundation, - 0, - 0, - indicator_id="", - scenario="", - year=2030, - ) - ] - - time_requests = time.time() - start - print(f"Time for requests dictionary creation {time_requests}s ") - start = time.time() - # read requests: - for key in asset_requests: - if asset_requests[key][0].longitude != 0: - raise Exception() - - time_responses = time.time() - start - print(f"Time for response dictionary creation {time_responses}s ") + time_responses = time.time() - start + print(f"Time for response dictionary creation {time_responses}s ") diff --git a/tests/kernel/chronic_asset_impact_test.py b/tests/kernel/chronic_asset_impact_test.py index 1f5e74b6..299b602c 100644 --- a/tests/kernel/chronic_asset_impact_test.py +++ b/tests/kernel/chronic_asset_impact_test.py @@ -1,7 +1,7 @@ -import unittest from typing import Iterable, List, Union import numpy as np +import pytest from scipy.stats import norm from physrisk.data.pregenerated_hazard_model import ZarrHazardModel @@ -13,6 +13,7 @@ HazardParameterDataResponse, ) from physrisk.kernel.hazards import ChronicHeat +from physrisk.kernel import calculation # noqa: F401 ## Avoid circular imports from physrisk.kernel.impact import calculate_impacts from physrisk.kernel.impact_distrib import ImpactDistrib, ImpactType from physrisk.kernel.vulnerability_model import ( @@ -100,13 +101,10 @@ def get_impact( Returns: Probability distribution of impacts. """ - assert isinstance(asset, IndustrialActivity) baseline_dd_above_mean, scenario_dd_above_mean = data_responses - - # check expected type; can maybe do this more nicely + assert isinstance(asset, IndustrialActivity) assert isinstance(baseline_dd_above_mean, HazardParameterDataResponse) assert isinstance(scenario_dd_above_mean, HazardParameterDataResponse) - # Ensuring that the values are greater than zero. Should be by defition. assert scenario_dd_above_mean.parameter >= 0 assert baseline_dd_above_mean.parameter >= 0 @@ -119,11 +117,11 @@ def get_impact( ) hours_worked = self.total_labour_hours fraction_loss_mean = ( - delta_dd_above_mean * self.time_lost_per_degree_day - ) / hours_worked + delta_dd_above_mean * self.time_lost_per_degree_day / hours_worked + ) fraction_loss_std = ( - delta_dd_above_mean * self.time_lost_per_degree_day_se - ) / hours_worked + delta_dd_above_mean * self.time_lost_per_degree_day_se / hours_worked + ) return get_impact_distrib( fraction_loss_mean, @@ -184,70 +182,69 @@ def get_impact_distrib( return ImpactDistrib(hazard_type, impact_bins, probs, hazard_paths, impact_type) -class TestChronicAssetImpact(unittest.TestCase): - """Tests the impact on an asset of a chronic hazard model.""" +@pytest.fixture +def setup_hazard_model(): + store = mock_hazard_model_store_heat(TestData.longitudes, TestData.latitudes) + hazard_model = ZarrHazardModel(source_paths=get_default_source_paths(), store=store) + return hazard_model - def test_chronic_vulnerability_model(self): - """Testing the generation of an asset when only an impact curve (e.g. damage curve is available)""" - store = mock_hazard_model_store_heat(TestData.longitudes, TestData.latitudes) - hazard_model = ZarrHazardModel( - source_paths=get_default_source_paths(), store=store - ) - # to run a live calculation, we omit the store parameter +def test_chronic_vulnerability_model(setup_hazard_model): + """Testing the generation of an asset when only an impact curve (e.g. damage curve is available)""" - scenario = "ssp585" - year = 2050 + hazard_model = setup_hazard_model + scenario = "ssp585" + year = 2050 - vulnerability_models = DictBasedVulnerabilityModels( - {IndustrialActivity: [ChronicHeatGZNModel()]} - ) + vulnerability_models = DictBasedVulnerabilityModels( + {IndustrialActivity: [ChronicHeatGZNModel()]} + ) - assets = [ - IndustrialActivity(lat, lon, type="Construction") - for lon, lat in zip(TestData.longitudes, TestData.latitudes) - ][:1] + assets = [ + IndustrialActivity(lat, lon, type="Construction") + for lon, lat in zip(TestData.longitudes, TestData.latitudes) + ][:1] - results = calculate_impacts( - assets, hazard_model, vulnerability_models, scenario=scenario, year=year - ) + results = calculate_impacts( + assets, hazard_model, vulnerability_models, scenario=scenario, year=year + ) - value_test = list(results.values())[0][0].impact.mean_impact() - value_test = list(results.values())[0][0].impact.prob - value_exp = np.array( - [ - 0.02656777935, - 0.01152965908, - 0.01531928095, - 0.01983722513, - 0.02503479879, - 0.03079129430, - 0.03690901485, - 0.04311790414, - 0.04909118572, - 0.05447159590, - 0.51810304973, - 0.16109092806, - 0.00807680527, - 0.00005941883, - 0.00000005990, - 0.00000000001, - 0.00000000000, - 0.00000000000, - 0.00000000000, - 0.00000000000, - 0.00000000000, - 0.00000000000, - 0.00000000000, - 0.00000000000, - 0.00000000000, - 0.00000000000, - 0.00000000000, - 0.00000000000, - 0.00000000000, - 0.00000000000, - 0.00000000000, - ] - ) - value_diff = np.sum(np.abs(value_test - value_exp)) - self.assertAlmostEqual(value_diff, 0.0, places=8) + value_test = list(results.values())[0][0].impact.mean_impact() + value_test = list(results.values())[0][0].impact.prob + value_exp = np.array( + [ + 0.02656777935, + 0.01152965908, + 0.01531928095, + 0.01983722513, + 0.02503479879, + 0.03079129430, + 0.03690901485, + 0.04311790414, + 0.04909118572, + 0.05447159590, + 0.51810304973, + 0.16109092806, + 0.00807680527, + 0.00005941883, + 0.00000005990, + 0.00000000001, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + ] + ) + value_diff = np.sum(np.abs(value_test - value_exp)) + assert np.isclose(value_diff, 0.0, atol=1.0e-7) diff --git a/tests/kernel/curves_test.py b/tests/kernel/curves_test.py index bf488772..ee6d01d7 100644 --- a/tests/kernel/curves_test.py +++ b/tests/kernel/curves_test.py @@ -1,26 +1,19 @@ """Test asset impact calculations.""" -import unittest - import numpy as np +import pytest from physrisk.kernel.curve import ExceedanceCurve -class TestAssetImpact(unittest.TestCase): - """Tests asset impact calculations.""" - - def test_return_period_data(self): - return_periods = np.array( - [2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0] - ) - depths = np.array([0.059, 0.33, 0.51, 0.71, 0.86, 1.00, 1.15, 1.16, 1.16]) - - # say we need to add an extra depth point because the damage below that point is zero - # extra_depth = 0.75 +@pytest.fixture +def curve_data(): + return_periods = np.array([2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0]) + depths = np.array([0.059, 0.33, 0.51, 0.71, 0.86, 1.00, 1.15, 1.16, 1.16]) + exceed_probs = 1.0 / return_periods + return ExceedanceCurve(exceed_probs, depths) - exceed_probs = 1.0 / return_periods - curve = ExceedanceCurve(exceed_probs, depths) - curve = curve.add_value_point(0.75) - self.assertAlmostEqual(curve.probs[4], 0.03466667) +def test_return_period_data(curve_data): + curve = curve_data.add_value_point(0.75) + assert pytest.approx(curve.probs[4], rel=1e-6) == 0.03466667 diff --git a/tests/kernel/exposure_test.py b/tests/kernel/exposure_test.py index 72dad183..4a817018 100644 --- a/tests/kernel/exposure_test.py +++ b/tests/kernel/exposure_test.py @@ -1,13 +1,14 @@ -import json - import fsspec.implementations.local as local +import json import numpy as np +import pytest import physrisk.api.v1.common from physrisk.api.v1.exposure_req_resp import ( AssetExposureRequest, AssetExposureResponse, ) +from physrisk.kernel import calculation # noqa: F401 ## Avoid circular imports from physrisk.container import ZarrHazardModelFactory from physrisk.data.inventory import EmbeddedInventory from physrisk.data.inventory_reader import InventoryReader @@ -30,98 +31,98 @@ ) from physrisk.requests import Requester -from ..base_test import TestWithCredentials from ..data.hazard_model_store_test import TestData, mock_hazard_model_store_path_curves -class TestExposureMeasures(TestWithCredentials): - def test_jupiter_exposure_service(self): - assets, store, hazard_model_factory, expected = self._get_components() - inventory = EmbeddedInventory() - requester = Requester( - hazard_model_factory=hazard_model_factory, - vulnerability_models_factory=None, - inventory=inventory, - inventory_reader=InventoryReader(fs=local.LocalFileSystem(), base_path=""), - reader=ZarrReader(store=store), - colormaps=inventory.colormaps(), - measures_factory=DefaultMeasuresFactory, - ) - assets_api = physrisk.api.v1.common.Assets( - items=[ - physrisk.api.v1.common.Asset( - asset_class="Asset", latitude=a.latitude, longitude=a.longitude - ) - for a in assets[0:1] - ] +@pytest.fixture +def get_components(): + # "precipitation/jupiter/v1/max_daily_water_equivalent_{scenario}_{year}" + paths = [ + "combined_flood/jupiter/v1/fraction_{scenario}_{year}", + "chronic_heat/jupiter/v1/days_above_35c_{scenario}_{year}", + "wind/jupiter/v1/max_1min_{scenario}_{year}", + "drought/jupiter/v1/months_spei3m_below_-2_{scenario}_{year}", + "hail/jupiter/v1/days_above_5cm_{scenario}_{year}", + "fire/jupiter/v1/fire_probability_{scenario}_{year}", + ] + + all_resources = EmbeddedInventory().resources + resources = [all_resources[p] for p in paths] + + values = [np.array([v]) for v in [0.02, 15, 100, 0.7, 0.1, 0.9]] + + expected = { + CombinedInundation: Category.LOW, + ChronicHeat: Category.MEDIUM, + Wind: Category.MEDIUM, + Drought: Category.HIGH, + Hail: Category.LOWEST, + Fire: Category.HIGHEST, + } + + def path_curves(): + return dict( + (r.path.format(scenario="ssp585", year=2030), v) + for (r, v) in zip(resources, values) ) - request = AssetExposureRequest(assets=assets_api, scenario="ssp585", year=2050) - response = requester.get( - request_id="get_asset_exposure", request_dict=request.model_dump() - ) - result = AssetExposureResponse(**json.loads(response)).items[0] - expected = dict((k.__name__, v) for (k, v) in expected.items()) - for key in result.exposures.keys(): - assert result.exposures[key].category == expected[key].name - - def test_jupiter_exposure(self): - assets, _, hazard_model_factory, expected = self._get_components() - asset = assets[0] - measure = JupterExposureMeasure() - - results = calculate_exposures( - [asset], - hazard_model_factory.hazard_model(), - measure, - scenario="ssp585", - year=2030, - ) - categories = results[asset].hazard_categories - for k, v in expected.items(): - assert categories[k][0] == v - - def _get_components(self): - # "precipitation/jupiter/v1/max_daily_water_equivalent_{scenario}_{year}" - paths = [ - "combined_flood/jupiter/v1/fraction_{scenario}_{year}", - "chronic_heat/jupiter/v1/days_above_35c_{scenario}_{year}", - "wind/jupiter/v1/max_1min_{scenario}_{year}", - "drought/jupiter/v1/months_spei3m_below_-2_{scenario}_{year}", - "hail/jupiter/v1/days_above_5cm_{scenario}_{year}", - "fire/jupiter/v1/fire_probability_{scenario}_{year}", - ] - - all_resources = EmbeddedInventory().resources - resources = [all_resources[p] for p in paths] - values = [np.array([v]) for v in [0.02, 15, 100, 0.7, 0.1, 0.9]] - - expected = { - CombinedInundation: Category.LOW, - ChronicHeat: Category.MEDIUM, - Wind: Category.MEDIUM, - Drought: Category.HIGH, - Hail: Category.LOWEST, - Fire: Category.HIGHEST, - } - - def path_curves(): - return dict( - (r.path.format(scenario="ssp585", year=2030), v) - for (r, v) in zip(resources, values) + assets = [ + Asset(lat, lon) for (lat, lon) in zip(TestData.latitudes, TestData.longitudes) + ] + + store = mock_hazard_model_store_path_curves( + TestData.longitudes, TestData.latitudes, path_curves() + ) + + hazard_model_factory = ZarrHazardModelFactory( + source_paths=get_default_source_paths(EmbeddedInventory()), store=store + ) + + return assets, store, hazard_model_factory, expected + + +def test_jupiter_exposure_service(get_components): + assets, store, hazard_model_factory, expected = get_components + inventory = EmbeddedInventory() + requester = Requester( + hazard_model_factory=hazard_model_factory, + vulnerability_models_factory=None, + inventory=inventory, + inventory_reader=InventoryReader(fs=local.LocalFileSystem(), base_path=""), + reader=ZarrReader(store=store), + colormaps=inventory.colormaps(), + measures_factory=DefaultMeasuresFactory, + ) + assets_api = physrisk.api.v1.common.Assets( + items=[ + physrisk.api.v1.common.Asset( + asset_class="Asset", latitude=a.latitude, longitude=a.longitude ) - - assets = [ - Asset(lat, lon) - for (lat, lon) in zip(TestData.latitudes, TestData.longitudes) + for a in assets[0:1] ] - - store = mock_hazard_model_store_path_curves( - TestData.longitudes, TestData.latitudes, path_curves() - ) - - hazard_model_factory = ZarrHazardModelFactory( - source_paths=get_default_source_paths(EmbeddedInventory()), store=store - ) - - return assets, store, hazard_model_factory, expected + ) + request = AssetExposureRequest(assets=assets_api, scenario="ssp585", year=2050) + response = requester.get( + request_id="get_asset_exposure", request_dict=request.model_dump() + ) + result = AssetExposureResponse(**json.loads(response)).items[0] + expected = dict((k.__name__, v) for (k, v) in expected.items()) + for key in result.exposures.keys(): + assert result.exposures[key].category == expected[key].name + + +def test_jupiter_exposure(get_components): + assets, _, hazard_model_factory, expected = get_components + asset = assets[0] + measure = JupterExposureMeasure() + + results = calculate_exposures( + [asset], + hazard_model_factory.hazard_model(), + measure, + scenario="ssp585", + year=2030, + ) + categories = results[asset].hazard_categories + for k, v in expected.items(): + assert categories[k][0] == v diff --git a/tests/kernel/financial_model_test.py b/tests/kernel/financial_model_test.py index e06a3a49..601af25c 100644 --- a/tests/kernel/financial_model_test.py +++ b/tests/kernel/financial_model_test.py @@ -1,7 +1,7 @@ -import unittest from datetime import datetime import numpy as np +import pytest from physrisk.data.pregenerated_hazard_model import ZarrHazardModel from physrisk.hazard_models.core_hazards import get_default_source_paths @@ -22,56 +22,53 @@ def get_asset_aggregate_cashflows( return 1000 -class TestAssetImpact(unittest.TestCase): - """Tests asset impact calculations.""" - - def test_financial_model(self): - curve = np.array( - [ - 0.059601218, - 0.33267087, - 0.50511575, - 0.71471703, - 0.8641244, - 1.0032823, - 1.1491022, - 1.1634114, - 1.1634114, - ] - ) - store = mock_hazard_model_store_inundation( - TestData.longitudes, TestData.latitudes, curve - ) - - # we need to define - # 1) The hazard models - # 2) The vulnerability models - # 3) The financial models - - hazard_model = ZarrHazardModel( - source_paths=get_default_source_paths(), store=store - ) +@pytest.fixture +def financial_model_fixture(): + curve = np.array( + [ + 0.059601218, + 0.33267087, + 0.50511575, + 0.71471703, + 0.8641244, + 1.0032823, + 1.1491022, + 1.1634114, + 1.1634114, + ] + ) + store = mock_hazard_model_store_inundation( + TestData.longitudes, TestData.latitudes, curve + ) - model = LossModel(hazard_model=hazard_model) + # we need to define + # 1) The hazard models + # 2) The vulnerability models + # 3) The financial models + hazard_model = ZarrHazardModel(source_paths=get_default_source_paths(), store=store) + model = LossModel(hazard_model=hazard_model) + data_provider = MockFinancialDataProvider() + financial_model = FinancialModel(data_provider) + return model, financial_model - data_provider = MockFinancialDataProvider() - financial_model = FinancialModel(data_provider) - assets = [ - PowerGeneratingAsset(lat, lon) - for lon, lat in zip(TestData.longitudes, TestData.latitudes) - ] - measures = model.get_financial_impacts( - assets, - financial_model=financial_model, - scenario="ssp585", - year=2080, - sims=100000, - ) +def test_financial_model(financial_model_fixture): + model, financial_model = financial_model_fixture + assets = [ + PowerGeneratingAsset(lat, lon) + for lon, lat in zip(TestData.longitudes, TestData.latitudes) + ] + measures = model.get_financial_impacts( + assets, + financial_model=financial_model, + scenario="ssp585", + year=2080, + sims=100000, + ) - np.testing.assert_array_almost_equal_nulp( - measures["RiverineInundation"]["percentile_values"], - np.array( - [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1000.0, 1000.0, 1000.0, 2000.0] - ), - ) + np.testing.assert_array_almost_equal_nulp( + measures["RiverineInundation"]["percentile_values"], + np.array( + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1000.0, 1000.0, 1000.0, 2000.0] + ), + ) diff --git a/tests/kernel/hazard_models_test.py b/tests/kernel/hazard_models_test.py index 7950b9a1..69abe725 100644 --- a/tests/kernel/hazard_models_test.py +++ b/tests/kernel/hazard_models_test.py @@ -3,7 +3,6 @@ import numpy as np -import tests.data.hazard_model_store_test as hms from physrisk.kernel.assets import RealEstateAsset from physrisk.kernel.hazard_model import ( HazardDataRequest, @@ -13,10 +12,13 @@ HazardParameterDataResponse, ) from physrisk.kernel.hazards import ChronicHeat, Wind +from physrisk.kernel import calculation # noqa: F401 ## Avoid circular imports from physrisk.kernel.impact import calculate_impacts from physrisk.kernel.vulnerability_model import DictBasedVulnerabilityModels from physrisk.vulnerability_models.real_estate_models import GenericTropicalCycloneModel +from ..data.hazard_model_store_test import TestData + @dataclass class SinglePointData: @@ -90,15 +92,15 @@ def test_using_point_based_hazard_model(): year = 2080 assets = [ RealEstateAsset(lat, lon, location="Asia", type="Buildings/Industrial") - for lon, lat in zip(hms.TestData.longitudes[0:1], hms.TestData.latitudes[0:1]) + for lon, lat in zip(TestData.longitudes[0:1], TestData.latitudes[0:1]) ] # fmt: off wind_return_periods = np.array([10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0]) # noqa wind_intensities = np.array([37.279999, 44.756248, 48.712502, 51.685001, 53.520000, 55.230000, 56.302502, 57.336250, 58.452499, 59.283749, 63.312500, 65.482498, 66.352501, 67.220001, 67.767502, 68.117500, 68.372498, 69.127502, 70.897499 ]) # noqa # fmt: on point = SinglePointData( - hms.TestData.latitudes[0], - hms.TestData.longitudes[0], + TestData.latitudes[0], + TestData.longitudes[0], scenario=scenario, year=year, wind_return_periods=wind_return_periods, diff --git a/tests/kernel/image_creation_test.py b/tests/kernel/image_creation_test.py index 975bea9c..481ffedb 100644 --- a/tests/kernel/image_creation_test.py +++ b/tests/kernel/image_creation_test.py @@ -1,7 +1,6 @@ import os -import unittest - import numpy as np +import pytest import zarr import zarr.storage @@ -9,50 +8,45 @@ from physrisk.data.image_creator import ImageCreator from physrisk.data.zarr_reader import ZarrReader -from ..base_test import TestWithCredentials - - -class TestImageCreation(TestWithCredentials): - def test_image_creation(self): - path = "test_array" - store = zarr.storage.MemoryStore(root="hazard.zarr") - root = zarr.open(store=store, mode="w") - - im = np.array([[1.2, 0.8], [0.5, 0.4]]) - z = root.create_dataset( # type: ignore - path, - shape=(1, im.shape[0], im.shape[1]), - chunks=(1, im.shape[0], im.shape[1]), - dtype="f4", - ) - z[0, :, :] = im - converter = ImageCreator(reader=ZarrReader(store)) - colormap = colormap_provider.colormap("test") - - def get_colors(index: int): - return colormap[str(index)] - - result = converter._to_rgba(im, get_colors) - # Max should be 255, min should be 1. Other values span the 253 elements from 2 to 254. - expected = np.array( - [ - [255, 2 + (0.8 - 0.4) * 253 / (1.2 - 0.4)], - [2 + (0.5 - 0.4) * 253 / (1.2 - 0.4), 1], - ] - ) - converter.convert( - path, colormap="test" - ) # check no error running through mocked example. - np.testing.assert_equal(result, expected.astype(np.uint8)) - - @unittest.skip("just example") - def test_write_file(self): - # show how to create image from zarr array - # useful for testing image generation - test_output_dir = "{set me}" - test_path = "wildfire/jupiter/v1/wildfire_probability_ssp585_2050_map" - store = zarr.DirectoryStore( - os.path.join(test_output_dir, "hazard_test", "hazard.zarr") - ) - creator = ImageCreator(ZarrReader(store)) - creator.to_file(os.path.join(test_output_dir, "test.png"), test_path) + +def test_image_creation(): + path = "test_array" + store = zarr.storage.MemoryStore(root="hazard.zarr") + root = zarr.open(store=store, mode="w") + + im = np.array([[1.2, 0.8], [0.5, 0.4]]) + z = root.create_dataset( # type: ignore + path, + shape=(1, im.shape[0], im.shape[1]), + chunks=(1, im.shape[0], im.shape[1]), + dtype="f4", + ) + z[0, :, :] = im + converter = ImageCreator(reader=ZarrReader(store)) + colormap = colormap_provider.colormap("test") + + def get_colors(index: int): + return colormap[str(index)] + + result = converter._to_rgba(im, get_colors) + # Max should be 255, min should be 1. Other values span the 253 elements from 2 to 254. + expected = np.array( + [ + [255, 2 + (0.8 - 0.4) * 253 / (1.2 - 0.4)], + [2 + (0.5 - 0.4) * 253 / (1.2 - 0.4), 1], + ] + ) + converter.convert( + path, colormap="test" + ) # check no error running through mocked example. + np.testing.assert_equal(result, expected.astype(np.uint8)) + + +@pytest.mark.skip(reason="just example") +def test_write_file(test_dir): + # show how to create image from zarr array + # useful for testing image generation + test_path = "wildfire/jupiter/v1/wildfire_probability_ssp585_2050_map" + store = zarr.DirectoryStore(os.path.join(test_dir, "hazard_test", "hazard.zarr")) + creator = ImageCreator(ZarrReader(store)) + creator.to_file(os.path.join(test_dir, "test.png"), test_path) diff --git a/tests/models/example_models_test.py b/tests/models/example_models_test.py index 3026865c..eb492b3e 100644 --- a/tests/models/example_models_test.py +++ b/tests/models/example_models_test.py @@ -1,5 +1,3 @@ -import unittest - import numpy as np from scipy import stats @@ -8,6 +6,7 @@ from physrisk.kernel.assets import Asset, RealEstateAsset from physrisk.kernel.hazard_model import HazardEventDataResponse from physrisk.kernel.hazards import Inundation, RiverineInundation +from physrisk.kernel import calculation # noqa: F401 ## Avoid circular imports from physrisk.kernel.impact import calculate_impacts from physrisk.kernel.impact_distrib import ImpactType from physrisk.kernel.vulnerability_matrix_provider import VulnMatrixProvider @@ -18,10 +17,7 @@ from physrisk.vulnerability_models.example_models import ( ExampleCdfBasedVulnerabilityModel, ) -from tests.data.hazard_model_store_test import ( - TestData, - mock_hazard_model_store_inundation, -) +from ..data.hazard_model_store_test import TestData, mock_hazard_model_store_inundation class ExampleRealEstateInundationModel(VulnerabilityModel): @@ -75,76 +71,87 @@ def beta_distrib(mean, std): return lambda x, a=a, b=b: stats.beta.cdf(x, a, b) -class TestExampleModels(unittest.TestCase): - def test_pdf_based_vulnerability_model(self): - model = ExampleCdfBasedVulnerabilityModel( - indicator_id="", hazard_type=Inundation - ) - - latitude, longitude = 45.268405, 19.885738 - asset = Asset(latitude, longitude) - - return_periods = np.array( - [2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0] - ) - intensities = np.array( - [ - 0.059601218, - 0.33267087, - 0.50511575, - 0.71471703, - 0.8641244, - 1.0032823, - 1.1491022, - 1.1634114, - 1.1634114, - ] - ) - - mock_response = HazardEventDataResponse(return_periods, intensities) - - vul, event = model.get_distributions(asset, [mock_response]) - - def test_user_supplied_model(self): - curve = np.array( - [ - 0.059601218, - 0.33267087, - 0.50511575, - 0.71471703, - 0.8641244, - 1.0032823, - 1.1491022, - 1.1634114, - 1.1634114, - ] - ) - store = mock_hazard_model_store_inundation( - TestData.longitudes, TestData.latitudes, curve - ) - hazard_model = ZarrHazardModel( - source_paths=get_default_source_paths(), store=store - ) - - scenario = "rcp8p5" - year = 2080 - - vulnerability_models = DictBasedVulnerabilityModels( - {RealEstateAsset: [ExampleRealEstateInundationModel()]} - ) - - assets = [ - RealEstateAsset(lat, lon, location="Asia", type="Building/Industrial") - for lon, lat in zip(TestData.longitudes, TestData.latitudes) +def test_cdf_based_vulnerability_model(): + model = ExampleCdfBasedVulnerabilityModel(indicator_id="", hazard_type=Inundation) + + latitude, longitude = 45.268405, 19.885738 + asset = Asset(latitude, longitude) + + return_periods = np.array([2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0]) + intensities = np.array( + [ + 0.059601218, + 0.33267087, + 0.50511575, + 0.71471703, + 0.8641244, + 1.0032823, + 1.1491022, + 1.1634114, + 1.1634114, ] - - results = calculate_impacts( - assets, hazard_model, vulnerability_models, scenario=scenario, year=year - ) - - self.assertAlmostEqual( - results[assets[0], RiverineInundation, scenario, year][0] - .impact.to_exceedance_curve() - .probs[0], - 0.499, - ) + ) + + mock_response = HazardEventDataResponse(return_periods, intensities) + + vul, event = model.get_distributions(asset, [mock_response]) + + value_exp = np.array( + [ + 0.05960122, + 0.33267087, + 0.50511575, + 0.71471703, + 0.8641244, + 1.0032823, + 1.1491022, + 1.1634114, + 1.1634114, + ] + ) + + value_diff = np.sum(np.abs(vul.intensity_bins - value_exp)) + assert np.isclose(value_diff, 0.0, atol=1.0e-7) + + +def test_user_supplied_model(): + curve = np.array( + [ + 0.059601218, + 0.33267087, + 0.50511575, + 0.71471703, + 0.8641244, + 1.0032823, + 1.1491022, + 1.1634114, + 1.1634114, + ] + ) + store = mock_hazard_model_store_inundation( + TestData.longitudes, TestData.latitudes, curve + ) + hazard_model = ZarrHazardModel(source_paths=get_default_source_paths(), store=store) + + scenario = "rcp8p5" + year = 2080 + + vulnerability_models = DictBasedVulnerabilityModels( + {RealEstateAsset: [ExampleRealEstateInundationModel()]} + ) + + assets = [ + RealEstateAsset(lat, lon, location="Asia", type="Building/Industrial") + for lon, lat in zip(TestData.longitudes, TestData.latitudes) + ] + + results = calculate_impacts( + assets, hazard_model, vulnerability_models, scenario=scenario, year=year + ) + + assert np.isclose( + results[assets[0], RiverineInundation, scenario, year][0] + .impact.to_exceedance_curve() + .probs[0], + 0.499, + ) diff --git a/tests/models/power_generating_asset_models_test.py b/tests/models/power_generating_asset_models_test.py index 5478686c..392d40c2 100644 --- a/tests/models/power_generating_asset_models_test.py +++ b/tests/models/power_generating_asset_models_test.py @@ -1,16 +1,22 @@ """Test asset impact calculations.""" import os -import unittest from typing import List import numpy as np +import pandas as pd +import pytest import physrisk.api.v1.common import physrisk.data.static.world as wd -from physrisk.kernel import Asset, PowerGeneratingAsset, calculation +from physrisk.data.pregenerated_hazard_model import ZarrHazardModel +from physrisk.data.zarr_reader import ZarrReader +from physrisk.hazard_models.core_hazards import get_default_source_paths +from physrisk.kernel import calculation from physrisk.kernel.assets import ( + Asset, IndustrialActivity, + PowerGeneratingAsset, RealEstateAsset, ThermalPowerGeneratingAsset, ) @@ -18,244 +24,812 @@ from physrisk.kernel.impact import calculate_impacts from physrisk.kernel.impact_distrib import EmptyImpactDistrib from physrisk.kernel.vulnerability_model import DictBasedVulnerabilityModels -from physrisk.utils.lazy import lazy_import from physrisk.vulnerability_models.power_generating_asset_models import InundationModel -from tests.base_test import TestWithCredentials +from physrisk.vulnerability_models.thermal_power_generation_models import ( + ThermalPowerGenerationAirTemperatureModel, + ThermalPowerGenerationAqueductWaterRiskModel, + ThermalPowerGenerationCoastalInundationModel, + ThermalPowerGenerationCoastalInundationTUDelftModel, + ThermalPowerGenerationDroughtModel, + ThermalPowerGenerationHighFireModel, + ThermalPowerGenerationLandslideModel, + ThermalPowerGenerationRiverineInundationModel, + ThermalPowerGenerationRiverineInundationTUDelftModel, + ThermalPowerGenerationSevereConvectiveWindstormModel, + ThermalPowerGenerationSubsidenceModel, + ThermalPowerGenerationWaterStressModel, + ThermalPowerGenerationWaterTemperatureModel, +) -pd = lazy_import("pandas") +@pytest.fixture +def setup_data(wri_power_plant_assets): + """Fixture to set up the test data.""" + asset_list = wri_power_plant_assets -class TestPowerGeneratingAssetModels(TestWithCredentials): - """Tests World Resource Institute (WRI) models for power generating assets.""" + filtered = asset_list.loc[ + asset_list["primary_fuel"].isin(["Coal", "Gas", "Nuclear", "Oil"]) + ] + filtered = filtered[filtered["latitude"] > -60] - def test_inundation(self): - # exceedance curve - return_periods = np.array( - [2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0] - ) - base_depth = np.array( - [ - 0.0, - 0.22372675, - 0.3654859, - 0.5393629, - 0.6642473, - 0.78564394, - 0.9406518, - 1.0539534, - 1.1634114, - ] + longitudes = np.array(filtered["longitude"]) + latitudes = np.array(filtered["latitude"]) + primary_fuels = np.array( + [ + primary_fuel.replace(" and ", "And").replace(" ", "") + for primary_fuel in filtered["primary_fuel"] + ] + ) + capacities = np.array(filtered["capacity_mw"]) + asset_names = np.array(filtered["name"]) + + countries, continents = wd.get_countries_and_continents( + latitudes=latitudes, longitudes=longitudes + ) + + assets = [ + ThermalPowerGeneratingAsset( + latitude, + longitude, + type=primary_fuel, + location=continent, + capacity=capacity, ) - future_depth = np.array( - [ - 0.059601218, - 0.33267087, - 0.50511575, - 0.71471703, - 0.8641244, - 1.0032823, - 1.1491022, - 1.1634114, - 1.1634114, - ] + for latitude, longitude, capacity, primary_fuel, continent in zip( + latitudes, + longitudes, + capacities, + primary_fuels, + continents, ) + ] + + for i, asset_name in enumerate(asset_names): + assets[i].__dict__.update({"asset_name": asset_name}) - # we mock the response of the data request - responses_mock = [ - HazardEventDataResponse(return_periods, base_depth), - HazardEventDataResponse(return_periods, future_depth), + return assets + + +@pytest.fixture +def setup_assets_extra(wri_power_plant_assets): + asset_list = wri_power_plant_assets + filtered = asset_list.loc[ + asset_list["primary_fuel"].isin(["Coal", "Gas", "Nuclear", "Oil"]) + ] + filtered = filtered[-60 < filtered["latitude"]] + + longitudes = np.array(filtered["longitude"]) + latitudes = np.array(filtered["latitude"]) + primary_fuels = np.array( + [ + primary_fuel.replace(" and ", "And").replace(" ", "") + for primary_fuel in filtered["primary_fuel"] ] + ) + capacities = np.array(filtered["capacity_mw"]) + + countries, continents = wd.get_countries_and_continents( + latitudes=latitudes, longitudes=longitudes + ) + + assets = [ + ThermalPowerGeneratingAsset( + latitude, + longitude, + type=primary_fuel, + location=country, + capacity=capacity, + ) + for latitude, longitude, capacity, primary_fuel, country in zip( + latitudes, + longitudes, + capacities, + primary_fuels, + countries, + ) + if country in ["Spain"] + ] + + return assets - latitude, longitude = 45.268405, 19.885738 - assets = [Asset(latitude, longitude)] - model = InundationModel(assets) - impact, _, _ = model.get_impact_details(assets[0], responses_mock) - mean = impact.mean_impact() +@pytest.fixture +def hazard_indicator_dict(): + """Fixture for hazard indicator dictionary.""" + return { + "AirTemperature": "days_tas_above", + "CoastalInundation": "flood_depth", + "RiverineInundation": "flood_depth", + "Drought": "months_spei3m_below_minus2", + "WaterStress": "water_stress_and_water_supply", + "WaterTemperature": "weeks_water_temp_above", + } - self.assertAlmostEqual(mean, 4.8453897 / 365.0) - @unittest.skip("example, not test") - def test_create_synthetic_portfolios_and_test(self): - # cache_folder = r"" +@pytest.fixture +def vulnerability_models_dict(): + """Fixture for vulnerability models dictionary.""" + return { + "historical_1980": [ + ThermalPowerGenerationRiverineInundationModel(), + ThermalPowerGenerationCoastalInundationModel(), + ], + "historical_2005": [ThermalPowerGenerationAirTemperatureModel()], + "ssp126_2030": [ + ThermalPowerGenerationAirTemperatureModel(), + ThermalPowerGenerationWaterTemperatureModel(), + ], + "ssp126_2040": [ + ThermalPowerGenerationAirTemperatureModel(), + ThermalPowerGenerationWaterTemperatureModel(), + ], + "ssp126_2050": [ + ThermalPowerGenerationAirTemperatureModel(), + ThermalPowerGenerationWaterTemperatureModel(), + ], + "ssp126_2060": [ThermalPowerGenerationWaterTemperatureModel()], + "ssp126_2070": [ThermalPowerGenerationWaterTemperatureModel()], + "ssp126_2080": [ + ThermalPowerGenerationWaterTemperatureModel(), + ], + "ssp126_2090": [ThermalPowerGenerationWaterTemperatureModel()], + "ssp245_2030": [ + ThermalPowerGenerationAirTemperatureModel(), + ThermalPowerGenerationWaterTemperatureModel(), + ThermalPowerGenerationCoastalInundationModel(), + ThermalPowerGenerationRiverineInundationModel(), + ], + "ssp245_2040": [ + ThermalPowerGenerationAirTemperatureModel(), + ThermalPowerGenerationWaterTemperatureModel(), + ], + "ssp245_2050": [ + ThermalPowerGenerationAirTemperatureModel(), + ThermalPowerGenerationWaterTemperatureModel(), + ThermalPowerGenerationRiverineInundationModel(), + ThermalPowerGenerationCoastalInundationModel(), + ], + "ssp245_2060": [ThermalPowerGenerationWaterTemperatureModel()], + "ssp245_2070": [ThermalPowerGenerationWaterTemperatureModel()], + "ssp245_2080": [ + ThermalPowerGenerationWaterTemperatureModel(), + ThermalPowerGenerationCoastalInundationModel(), + ThermalPowerGenerationRiverineInundationModel(), + ], + "ssp245_2090": [ThermalPowerGenerationWaterTemperatureModel()], + "ssp585_2005": [ThermalPowerGenerationDroughtModel()], + "ssp585_2030": [ + ThermalPowerGenerationAirTemperatureModel(), + ThermalPowerGenerationDroughtModel(), + ThermalPowerGenerationWaterTemperatureModel(), + ThermalPowerGenerationCoastalInundationModel(), + ThermalPowerGenerationRiverineInundationModel(), + ], + "ssp585_2040": [ + ThermalPowerGenerationAirTemperatureModel(), + ThermalPowerGenerationDroughtModel(), + ThermalPowerGenerationWaterTemperatureModel(), + ], + "ssp585_2050": [ + ThermalPowerGenerationAirTemperatureModel(), + ThermalPowerGenerationDroughtModel(), + ThermalPowerGenerationWaterTemperatureModel(), + ThermalPowerGenerationCoastalInundationModel(), + ThermalPowerGenerationRiverineInundationModel(), + ], + "ssp585_2060": [ThermalPowerGenerationWaterTemperatureModel()], + "ssp585_2070": [ThermalPowerGenerationWaterTemperatureModel()], + "ssp585_2080": [ + ThermalPowerGenerationWaterTemperatureModel(), + ThermalPowerGenerationCoastalInundationModel(), + ThermalPowerGenerationRiverineInundationModel(), + ThermalPowerGenerationDroughtModel(), + ], + "ssp585_2090": [ThermalPowerGenerationWaterTemperatureModel()], + } - cache_folder = r"/users/joemoorhouse/code/data" - asset_list = pd.read_csv(os.path.join(cache_folder, "wri-all.csv")) - # types = asset_list["primary_fuel"].unique() - # interesting = [3, 8, 13, 14, 22, 25, 27, 28, 33, 40, 51, 64, 65, 66, 71, 72, 80, 88, 92, 109] +@pytest.fixture +def vulnerability_models_dict_water_stress(): + """Fixture for vulnerability models dictionary.""" + return { + "historical_1999": [ThermalPowerGenerationWaterStressModel()], + "ssp126_2030": [ + ThermalPowerGenerationWaterStressModel(), + ], + "ssp126_2050": [ + ThermalPowerGenerationWaterStressModel(), + ], + "ssp126_2080": [ + ThermalPowerGenerationWaterStressModel(), + ], + "ssp370_2030": [ThermalPowerGenerationWaterStressModel()], + "ssp370_2050": [ThermalPowerGenerationWaterStressModel()], + "ssp370_2080": [ThermalPowerGenerationWaterStressModel()], + "ssp585_2030": [ + ThermalPowerGenerationWaterStressModel(), + ], + "ssp585_2050": [ + ThermalPowerGenerationWaterStressModel(), + ], + "ssp585_2080": [ + ThermalPowerGenerationWaterStressModel(), + ], + } - filtered = asset_list[0:1000] - longitudes = np.array(filtered["longitude"]) - latitudes = np.array(filtered["latitude"]) - primary_fuel = np.array(filtered["primary_fuel"]) - generation = np.array(filtered["estimated_generation_gwh"]) +@pytest.fixture +def vul_models_dict_extra(): + return { + "historical_1971": [ + ThermalPowerGenerationHighFireModel(), + ThermalPowerGenerationSevereConvectiveWindstormModel(), + ThermalPowerGenerationCoastalInundationTUDelftModel(), + ThermalPowerGenerationRiverineInundationTUDelftModel(), + ], + "historical_1980": [ + ThermalPowerGenerationLandslideModel(), + ThermalPowerGenerationSubsidenceModel(), + ], + "ssp126_2030": [ThermalPowerGenerationAqueductWaterRiskModel()], + "ssp126_2050": [ThermalPowerGenerationAqueductWaterRiskModel()], + "ssp126_2080": [ThermalPowerGenerationAqueductWaterRiskModel()], + "ssp370_2030": [ThermalPowerGenerationAqueductWaterRiskModel()], + "ssp370_2050": [ThermalPowerGenerationAqueductWaterRiskModel()], + "ssp370_2080": [ThermalPowerGenerationAqueductWaterRiskModel()], + "rcp45_2050": [ + ThermalPowerGenerationHighFireModel(), + ThermalPowerGenerationSevereConvectiveWindstormModel(), + ThermalPowerGenerationCoastalInundationTUDelftModel(), + ThermalPowerGenerationRiverineInundationTUDelftModel(), + ], + "rcp45_2070": [ + ThermalPowerGenerationCoastalInundationTUDelftModel(), + ThermalPowerGenerationRiverineInundationTUDelftModel(), + ], + "rcp45_2100": [ + ThermalPowerGenerationHighFireModel(), + ThermalPowerGenerationSevereConvectiveWindstormModel(), + ], + "ssp585_2030": [ThermalPowerGenerationAqueductWaterRiskModel()], + "ssp585_2050": [ThermalPowerGenerationAqueductWaterRiskModel()], + "ssp585_2080": [ThermalPowerGenerationAqueductWaterRiskModel()], + "rcp85_2050": [ + ThermalPowerGenerationHighFireModel(), + ThermalPowerGenerationSevereConvectiveWindstormModel(), + ThermalPowerGenerationCoastalInundationTUDelftModel(), + ThermalPowerGenerationRiverineInundationTUDelftModel(), + ], + "rcp85_2070": [ + ThermalPowerGenerationCoastalInundationTUDelftModel(), + ThermalPowerGenerationRiverineInundationTUDelftModel(), + ], + "rcp85_2100": [ + ThermalPowerGenerationHighFireModel(), + ThermalPowerGenerationSevereConvectiveWindstormModel(), + ], + } - _, continents = wd.get_countries_and_continents( - latitudes=latitudes, longitudes=longitudes + +def api_assets(assets: List[Asset]): + items = [ + physrisk.api.v1.common.Asset( + asset_class=type(a).__name__, + type=getattr(a, "type", None), + location=getattr(a, "location", None), + latitude=a.latitude, + longitude=a.longitude, ) + for a in assets + ] + return physrisk.api.v1.common.Assets(items=items) - # Power generating assets that are of interest - assets = [ - PowerGeneratingAsset( - lat, lon, generation=gen, location=continent, type=prim_fuel - ) - for lon, lat, gen, prim_fuel, continent in zip( - longitudes, latitudes, generation, primary_fuel, continents - ) + +def test_inundation(): + # exceedance curve + return_periods = np.array([2.0, 5.0, 10.0, 25.0, 50.0, 100.0, 250.0, 500.0, 1000.0]) + base_depth = np.array( + [ + 0.0, + 0.22372675, + 0.3654859, + 0.5393629, + 0.6642473, + 0.78564394, + 0.9406518, + 1.0539534, + 1.1634114, ] - detailed_results = calculate_impacts(assets, scenario="ssp585", year=2030) - keys = list(detailed_results.keys()) - # detailed_results[keys[0]].impact.to_exceedance_curve() - means = np.array([detailed_results[key].impact.mean_impact() for key in keys]) - interesting = [k for (k, m) in zip(keys, means) if m > 0] - assets_out = self.api_assets(item[0] for item in interesting[0:10]) - with open( - os.path.join(cache_folder, "assets_example_power_generating_small.json"), - "w", - ) as f: - f.write(assets_out.model_dump_json(indent=4)) - - # Synthetic portfolio; industrial activity at different locations - assets = [ - IndustrialActivity(lat, lon, type="Construction", location=continent) - for lon, lat, continent in zip(longitudes, latitudes, continents) + ) + future_depth = np.array( + [ + 0.059601218, + 0.33267087, + 0.50511575, + 0.71471703, + 0.8641244, + 1.0032823, + 1.1491022, + 1.1634114, + 1.1634114, ] - assets = [assets[i] for i in [0, 100, 200, 300, 400, 500, 600, 700, 800, 900]] - detailed_results = calculate_impacts(assets, scenario="ssp585", year=2030) - keys = list(detailed_results.keys()) - means = np.array( - [ - detailed_results[key][0].impact.mean_impact() - for key in detailed_results.keys() - ] + ) + + # Mock the response of the data request + responses_mock = [ + HazardEventDataResponse(return_periods, base_depth), + HazardEventDataResponse(return_periods, future_depth), + ] + + latitude, longitude = 45.268405, 19.885738 + assets = [Asset(latitude, longitude)] + model = InundationModel(assets) + + impact, _, _ = model.get_impact_details(assets[0], responses_mock) + mean = impact.mean_impact() + + assert np.isclose(mean, 4.8453897 / 365.0) + + +def test_create_synthetic_portfolios_and_test( + wri_power_plant_assets, test_dir, load_credentials +): + asset_list = wri_power_plant_assets + filtered = asset_list[0:1000] + + longitudes = np.array(filtered["longitude"]) + latitudes = np.array(filtered["latitude"]) + primary_fuel = np.array(filtered["primary_fuel"]) + generation = np.array(filtered["estimated_generation_gwh_2017"]) + + _, continents = wd.get_countries_and_continents( + latitudes=latitudes, longitudes=longitudes + ) + + hazard_model = calculation.get_default_hazard_model() + vulnerability_models = DictBasedVulnerabilityModels( + calculation.get_default_vulnerability_models() + ) + + # Power generating assets that are of interest + assets = [ + PowerGeneratingAsset( + lat, lon, generation=gen, location=continent, type=prim_fuel ) - interesting = [k for (k, m) in zip(keys, means) if m > 0] - assets_out = self.api_assets(item[0] for item in interesting[0:10]) - with open( - os.path.join(cache_folder, "assets_example_industrial_activity_small.json"), - "w", - ) as f: - f.write(assets_out.model_dump_json(indent=4)) - - # Synthetic portfolio; real estate assets at different locations - assets = [ - RealEstateAsset(lat, lon, location=continent, type="Buildings/Industrial") - for lon, lat, continent in zip(longitudes, latitudes, continents) - if isinstance(continent, str) and continent != "Oceania" - ] - detailed_results = calculate_impacts(assets, scenario="ssp585", year=2030) - keys = list(detailed_results.keys()) - means = np.array( - [ - detailed_results[key][0].impact.mean_impact() - for key in detailed_results.keys() - ] + for lon, lat, gen, prim_fuel, continent in zip( + longitudes, latitudes, generation, primary_fuel, continents ) - interesting = [k for (k, m) in zip(keys, means) if m > 0] - assets_out = self.api_assets(item[0] for item in interesting[0:10]) - with open( - os.path.join(cache_folder, "assets_example_real_estate_small.json"), "w" - ) as f: - f.write(assets_out.model_dump_json(indent=4)) - self.assertAlmostEqual(1, 1) - - @unittest.skip("example, not test") - def test_thermal_power_generation_portfolio(self): - cache_folder = os.environ.get("CREDENTIAL_DOTENV_DIR", os.getcwd()) - - asset_list = pd.read_csv(os.path.join(cache_folder, "wri-all.csv")) - filtered = asset_list.loc[ - asset_list["primary_fuel"].isin(["Coal", "Gas", "Nuclear", "Oil"]) + ] + detailed_results = calculate_impacts( + assets, + hazard_model=hazard_model, + vulnerability_models=vulnerability_models, + scenario="ssp585", + year=2030, + ) + keys = list(detailed_results.keys()) + means = np.array([detailed_results[key][0].impact.mean_impact() for key in keys]) + interesting = [k for (k, m) in zip(keys, means) if m > 0] + assets_out = api_assets(item[0] for item in interesting[0:10]) + with open( + os.path.join(test_dir, "assets_example_power_generating_small.json"), "w" + ) as f: + f.write(assets_out.model_dump_json(indent=4)) + + # Synthetic portfolio; industrial activity at different locations + assets = [ + IndustrialActivity(lat, lon, type="Construction", location=continent) + for lon, lat, continent in zip(longitudes, latitudes, continents) + ] + assets = [assets[i] for i in [0, 100, 200, 300, 400, 500, 600, 700, 800, 900]] + detailed_results = calculate_impacts( + assets, + hazard_model=hazard_model, + vulnerability_models=vulnerability_models, + scenario="ssp585", + year=2030, + ) + keys = list(detailed_results.keys()) + means = np.array( + [ + detailed_results[key][0].impact.mean_impact() + for key in detailed_results.keys() ] - filtered = filtered[-60 < filtered["latitude"]] + ) + interesting = [k for (k, m) in zip(keys, means) if m > 0] + assets_out = api_assets(item[0] for item in interesting[0:10]) + with open( + os.path.join(test_dir, "assets_example_industrial_activity_small.json"), "w" + ) as f: + f.write(assets_out.model_dump_json(indent=4)) + + # Synthetic portfolio; real estate assets at different locations + assets = [ + RealEstateAsset(lat, lon, location=continent, type="Buildings/Industrial") + for lon, lat, continent in zip(longitudes, latitudes, continents) + if isinstance(continent, str) and continent != "Oceania" + ] + detailed_results = calculate_impacts( + assets, + hazard_model=hazard_model, + vulnerability_models=vulnerability_models, + scenario="ssp585", + year=2030, + ) + keys = list(detailed_results.keys()) + means = np.array( + [ + detailed_results[key][0].impact.mean_impact() + if detailed_results[key] + else None + for key in detailed_results.keys() + ] + ) + interesting = [k for (k, m) in zip(keys, means) if (m and m > 0)] + assets_out = api_assets(item[0] for item in interesting[0:10]) + with open( + os.path.join(test_dir, "assets_example_real_estate_small.json"), "w" + ) as f: + f.write(assets_out.model_dump_json(indent=4)) + assert len(os.listdir(test_dir)) == 3 + - longitudes = np.array(filtered["longitude"]) - latitudes = np.array(filtered["latitude"]) +def test_thermal_power_generation_portfolio( + wri_power_plant_assets, test_dir, load_credentials +): + asset_list = wri_power_plant_assets - primary_fuels = np.array( - [ - primary_fuel.replace(" and ", "And").replace(" ", "") - for primary_fuel in filtered["primary_fuel"] - ] + filtered = asset_list.loc[ + asset_list["primary_fuel"].isin(["Coal", "Gas", "Nuclear", "Oil"]) + ] + filtered = filtered[-60 < filtered["latitude"]] + + longitudes = np.array(filtered["longitude"]) + latitudes = np.array(filtered["latitude"]) + + primary_fuels = np.array( + [ + primary_fuel.replace(" and ", "And").replace(" ", "") + for primary_fuel in filtered["primary_fuel"] + ] + ) + + # Capacity describes a maximum electric power rate. + # Generation describes the actual electricity output of the plant over a period of time. + capacities = np.array(filtered["capacity_mw"]) + + _, continents = wd.get_countries_and_continents( + latitudes=latitudes, longitudes=longitudes + ) + + # Power generating assets that are of interest + assets = [ + ThermalPowerGeneratingAsset( + latitude, + longitude, + type=primary_fuel, + location=continent, + capacity=capacity, + ) + for latitude, longitude, capacity, primary_fuel, continent in zip( + latitudes, + longitudes, + capacities, + primary_fuels, + continents, ) + ] + + scenario = "ssp585" + year = 2030 - # Capacity describes a maximum electric power rate. - # Generation describes the actual electricity output of the plant over a period of time. - capacities = np.array(filtered["capacity_mw"]) + hazard_model = calculation.get_default_hazard_model() + vulnerability_models = DictBasedVulnerabilityModels( + calculation.get_default_vulnerability_models() + ) - _, continents = wd.get_countries_and_continents( - latitudes=latitudes, longitudes=longitudes + results = calculate_impacts( + assets, hazard_model, vulnerability_models, scenario=scenario, year=year + ) + out = [ + { + "asset": type(result.asset).__name__, + "type": getattr(result.asset, "type", None), + "capacity": getattr(result.asset, "capacity", None), + "location": getattr(result.asset, "location", None), + "latitude": result.asset.latitude, + "longitude": result.asset.longitude, + "impact_mean": ( + None + if isinstance(results[key][0].impact, EmptyImpactDistrib) + else results[key][0].impact.mean_impact() + ), + "hazard_type": key.hazard_type.__name__, + } + for result, key in zip(results, results.keys()) + ] + pd.DataFrame.from_dict(out).to_csv( + os.path.join( + test_dir, f"thermal_power_generation_example_{scenario}_{year}.csv" ) + ) + assert len(out) == 53052 + assert len(os.listdir(test_dir)) == 1 + + +def test_thermal_power_generation_impacts( + setup_data, hazard_indicator_dict, vulnerability_models_dict, load_credentials +): + assets = setup_data + hazard_model = calculation.get_default_hazard_model() + out = [] + empty_impact_count = 0 + asset_subtype_none_count = 0 + empty_impact_scenarios = [] + asset_subtype_none_assets = [] + exception_scenarios = [] + for scenario_year, vulnerability_models in vulnerability_models_dict.items(): + scenario, year = scenario_year.split("_") - # Power generating assets that are of interest - assets = [ - ThermalPowerGeneratingAsset( - latitude, - longitude, - type=primary_fuel, - location=continent, - capacity=capacity, + vulnerability_models = DictBasedVulnerabilityModels( + {ThermalPowerGeneratingAsset: vulnerability_models} + ) + + try: + results = calculate_impacts( + assets, + hazard_model, + vulnerability_models, + scenario=scenario, + year=int(year), ) - for latitude, longitude, capacity, primary_fuel, continent in zip( - latitudes, - longitudes, - capacities, - primary_fuels, - continents, + except Exception as e: + exception_scenarios.append((scenario, year, str(e))) + continue + + for result, key in zip(results, results.keys()): + impact = results[key][0].impact + if isinstance(impact, EmptyImpactDistrib): + impact_mean = None + hazard_type = None + impact_distr_bin_edges = "0;0" + impact_distr_p = "0;0" + empty_impact_count += 1 + empty_impact_scenarios.append((scenario, year, result.asset.asset_name)) + else: + impact_mean = impact.mean_impact() + impact_distr_bin_edges = ";".join(impact.impact_bins.astype(str)) + impact_distr_p = ";".join(impact.prob.astype(str)) + hazard_type = ( + impact.hazard_type.__name__ + if impact.hazard_type.__name__ != "type" + else "Wind" + ) + + indicator_id = ( + hazard_indicator_dict.get(hazard_type) if hazard_type else None ) - ] + asset_subtype = result.asset.type if hasattr(result.asset, "type") else None + if asset_subtype is None: + asset_subtype_none_count += 1 + asset_subtype_none_assets.append(result.asset.asset_name) + + out.append( + { + "asset_name": result.asset.asset_name, + "latitude": result.asset.latitude, + "longitude": result.asset.longitude, + "hazard_type": hazard_type, + "indicator_id": indicator_id, + "scenario": scenario, + "year": int(year), + "impact_mean": impact_mean, + "impact_distr_bin_edges": impact_distr_bin_edges, + "impact_distr_p": impact_distr_p, + } + ) + + # Out can be used when dealing with expected values. + + # Assert the counts and details for empty impacts, None asset_subtype, and exceptions + + assert ( + empty_impact_count == 0 + ), f"Found {empty_impact_count} EmptyImpactDistrib instances in scenarios: {empty_impact_scenarios}" + assert ( + asset_subtype_none_count == 0 + ), f"Found {asset_subtype_none_count} assets with None asset_subtype: {asset_subtype_none_assets}" + assert ( + not exception_scenarios + ), f"Exceptions occurred in scenarios: {exception_scenarios}" - scenario = "ssp585" - year = 2030 - hazard_model = calculation.get_default_hazard_model() +def test_thermal_power_generation_impacts_water_stress( + setup_data, + hazard_indicator_dict, + vulnerability_models_dict_water_stress, + load_credentials, +): + assets = setup_data + hazard_model = calculation.get_default_hazard_model() + out = [] + empty_impact_count = 0 + asset_subtype_none_count = 0 + empty_impact_scenarios = [] + asset_subtype_none_assets = [] + exception_scenarios = [] + for ( + scenario_year, + vulnerability_models, + ) in vulnerability_models_dict_water_stress.items(): + scenario, year = scenario_year.split("_") + vulnerability_models = DictBasedVulnerabilityModels( - calculation.get_default_vulnerability_models() + {ThermalPowerGeneratingAsset: vulnerability_models} ) - results = calculate_impacts( - assets, hazard_model, vulnerability_models, scenario=scenario, year=year - ) - out = [ - { - "asset": type(result.asset).__name__, - "type": getattr(result.asset, "type", None), - "capacity": getattr(result.asset, "capacity", None), - "location": getattr(result.asset, "location", None), - "latitude": result.asset.latitude, - "longitude": result.asset.longitude, - "impact_mean": ( - None - if isinstance(results[key][0].impact, EmptyImpactDistrib) - else results[key].impact.mean_impact() - ), - "hazard_type": key.hazard_type.__name__, - } - for result, key in zip(results, results.keys()) - ] - pd.DataFrame.from_dict(out).to_csv( - os.path.join( - cache_folder, - "thermal_power_generation_example_" - + scenario - + "_" - + str(year) - + ".csv", + try: + results = calculate_impacts( + assets, + hazard_model, + vulnerability_models, + scenario=scenario, + year=int(year), + ) + except Exception as e: + exception_scenarios.append((scenario, year, str(e))) + continue + + for result, key in zip(results, results.keys()): + impact = results[key][0].impact + if isinstance(impact, EmptyImpactDistrib): + impact_mean = None + hazard_type = None + impact_distr_bin_edges = "0;0" + impact_distr_p = "0;0" + empty_impact_count += 1 + empty_impact_scenarios.append((scenario, year, result.asset.asset_name)) + else: + impact_mean = impact.mean_impact() + impact_distr_bin_edges = ";".join(impact.impact_bins.astype(str)) + impact_distr_p = ";".join(impact.prob.astype(str)) + hazard_type = ( + impact.hazard_type.__name__ + if impact.hazard_type.__name__ != "type" + else "Wind" + ) + + indicator_id = ( + hazard_indicator_dict.get(hazard_type) if hazard_type else None ) + asset_subtype = result.asset.type if hasattr(result.asset, "type") else None + if asset_subtype is None: + asset_subtype_none_count += 1 + asset_subtype_none_assets.append(result.asset.asset_name) + + out.append( + { + "asset_name": result.asset.asset_name, + "latitude": result.asset.latitude, + "longitude": result.asset.longitude, + "hazard_type": hazard_type, + "indicator_id": indicator_id, + "scenario": scenario, + "year": int(year), + "impact_mean": impact_mean, + "impact_distr_bin_edges": impact_distr_bin_edges, + "impact_distr_p": impact_distr_p, + } + ) + + # Out can be used when dealing with expected values. + + # Assert the counts and details for empty impacts, None asset_subtype, and exceptions + + # There are 57 assets that are EmptyImpactDistrib objects, all for ThermalPowerGenerationWaterStressModel, which is used 10 times + assert ( + empty_impact_count == 570 + ), f"Found {empty_impact_count} EmptyImpactDistrib instances in scenarios: {empty_impact_scenarios}" + assert ( + asset_subtype_none_count == 0 + ), f"Found {asset_subtype_none_count} assets with None asset_subtype: {asset_subtype_none_assets}" + assert ( + not exception_scenarios + ), f"Exceptions occurred in scenarios: {exception_scenarios}" + + +def test_thermal_power_generation_impacts_extra( + load_credentials, setup_assets_extra, vul_models_dict_extra +): + """Calculate impacts for the vulnerability models from UseCaseId.STRESSTEST""" + assets = setup_assets_extra + + out = [] + empty_impact_count = 0 + asset_subtype_none_count = 0 + empty_impact_scenarios = [] + asset_subtype_none_assets = [] + exception_scenarios = [] + + for scenario_year, vulnerability_models in vul_models_dict_extra.items(): + scenario, year = scenario_year.split("_") + + print(scenario_year) + + if vulnerability_models is ThermalPowerGenerationAqueductWaterRiskModel(): + reader = ZarrReader() + else: + devaccess = { + "OSC_S3_ACCESS_KEY": os.environ.get("OSC_S3_ACCESS_KEY_DEV", None), + "OSC_S3_SECRET_KEY": os.environ.get("OSC_S3_SECRET_KEY_DEV", None), + "OSC_S3_BUCKET": os.environ.get("OSC_S3_BUCKET_DEV", None), + } + get_env = devaccess.get + reader = ZarrReader(get_env=get_env) + + vulnerability_models = DictBasedVulnerabilityModels( + {ThermalPowerGeneratingAsset: vulnerability_models} ) - self.assertAlmostEqual(1, 1) - - def api_assets(self, assets: List[Asset]): - items = [ - physrisk.api.v1.common.Asset( - asset_class=type(a).__name__, - type=getattr(a, "type", None), - location=getattr(a, "location", None), - latitude=a.latitude, - longitude=a.longitude, + + hazard_model = ZarrHazardModel( + source_paths=get_default_source_paths(), reader=reader + ) + + try: + results = calculate_impacts( + assets, + hazard_model, + vulnerability_models, + scenario=scenario, + year=int(year), ) - for a in assets - ] - return physrisk.api.v1.common.Assets(items=items) + except Exception as e: + exception_scenarios.append((scenario, year, str(e))) + continue + + for result, key in zip(results, results.keys()): + impact = results[key][0].impact + if isinstance(impact, EmptyImpactDistrib): + impact_mean = None + hazard_type = None + empty_impact_count += 1 + empty_impact_scenarios.append((scenario, year, result.asset.location)) + else: + impact_mean = impact.mean_impact() + hazard_type = ( + impact.hazard_type.__name__ + if impact.hazard_type.__name__ != "type" + else "Wind" + ) + + asset_subtype = result.asset.type if hasattr(result.asset, "type") else None + if asset_subtype is None: + asset_subtype_none_count += 1 + asset_subtype_none_assets.append(result.asset.location) + + out.append( + { + "asset": type(result.asset).__name__, + "type": getattr(result.asset, "type", None), + "location": getattr(result.asset, "location", None), + "latitude": result.asset.latitude, + "longitude": result.asset.longitude, + "impact_mean": impact_mean, + "hazard_type": hazard_type if hazard_type else "Wind", + "scenario": scenario, + "year": int(year), + } + ) + + # out can be used when dealing with expected values. + + # Assert the counts and details for empty impacts, None asset_subtype, and exceptions + assert ( + empty_impact_count == 0 + ), f"Found {empty_impact_count} EmptyImpactDistrib instances in scenarios: {empty_impact_scenarios}" + assert ( + asset_subtype_none_count == 0 + ), f"Found {asset_subtype_none_count} assets with None asset_subtype: {asset_subtype_none_assets}" + assert ( + not exception_scenarios + ), f"Exceptions occurred in scenarios: {exception_scenarios}" diff --git a/tests/models/real_estate_models_stress_test_test.py b/tests/models/real_estate_models_stress_test_test.py new file mode 100644 index 00000000..c65ea3d4 --- /dev/null +++ b/tests/models/real_estate_models_stress_test_test.py @@ -0,0 +1,304 @@ +"""Test asset impact calculations using pytest.""" + +import bz2 +import json + +import numpy as np +import pandas as pd +import pytest + +import physrisk.data.static.world as wd +from physrisk.data.pregenerated_hazard_model import ZarrHazardModel +from physrisk.data.zarr_reader import ZarrReader +from physrisk.hazard_models.core_hazards import get_default_source_paths +from physrisk.kernel import calculation # noqa: F401 ## Avoid circular imports +from physrisk.kernel.assets import RealEstateAsset +from physrisk.kernel.impact import calculate_impacts +from physrisk.kernel.impact_distrib import EmptyImpactDistrib +from physrisk.kernel.vulnerability_model import DictBasedVulnerabilityModels +from physrisk.vulnerability_models.real_estate_models import ( + CoolingModel, + GenericTropicalCycloneModel, + RealEstateCoastalInundationModel, + RealEstateRiverineInundationModel, +) + + +@pytest.fixture +def load_assets(): + with bz2.open("./tests/api/housing_kaggle_spain.json.bz2") as f: + houses = json.load(f) + + asset_df = pd.DataFrame(houses["items"]) + longitudes = asset_df.longitude + latitudes = asset_df.latitude + types = asset_df.type + asset_names = asset_df.address + asset_prices = asset_df.price + + countries, continents = wd.get_countries_and_continents( + latitudes=latitudes, longitudes=longitudes + ) + + assets = [ + RealEstateAsset(latitude, longitude, type=type_, location="Europe") + for latitude, longitude, type_ in zip(latitudes, longitudes, types) + ] + + for i, asset_name in enumerate(asset_names): + assets[i].__dict__.update({"asset_name": asset_name}) + + for i, asset_price in enumerate(asset_prices): + assets[i].__dict__.update({"asset_price": asset_price}) + + entrada = np.random.choice( + a=[10, 20, 30], size=len(asset_prices), p=[0.1, 0.2, 0.7] + ) + loan_amounts = (1 - entrada / 100) * asset_prices.to_numpy().astype(float) + + for i, loan_amount in enumerate(loan_amounts): + assets[i].__dict__.update({"asset_loan_amount": loan_amount}) + + return assets + + +@pytest.fixture +def hazard_indicator_dict(): + return { + "Wind": "max_speed", + "CoastalInundation": "flood_depth", + "RiverineInundation": "flood_depth", + "ChronicHeat": '"mean_degree_days_above', + } + + +@pytest.fixture +def vulnerability_models_dict(): + return { + "ssp119_2050": [GenericTropicalCycloneModel()], + "ssp126_2030": [CoolingModel()], + "ssp126_2040": [CoolingModel()], + "ssp126_2050": [CoolingModel()], + "ssp245_2030": [CoolingModel()], + "ssp245_2040": [CoolingModel()], + "ssp245_2050": [CoolingModel(), GenericTropicalCycloneModel()], + "ssp585_2030": [CoolingModel()], + "ssp585_2040": [CoolingModel()], + "ssp585_2050": [CoolingModel(), GenericTropicalCycloneModel()], + } + + +@pytest.fixture +def rcp_vulnerability_models_dict(): + return { + "rcp4p5_2030": [ + RealEstateCoastalInundationModel(), + RealEstateRiverineInundationModel(), + ], + "rcp4p5_2050": [ + RealEstateCoastalInundationModel(), + RealEstateRiverineInundationModel(), + ], + "rcp4p5_2080": [ + RealEstateCoastalInundationModel(), + RealEstateRiverineInundationModel(), + ], + "rcp8p5_2030": [ + RealEstateCoastalInundationModel(), + RealEstateRiverineInundationModel(), + ], + "rcp8p5_2050": [ + RealEstateCoastalInundationModel(), + RealEstateRiverineInundationModel(), + ], + "rcp8p5_2080": [ + RealEstateCoastalInundationModel(), + RealEstateRiverineInundationModel(), + ], + } + + +def test_calculate_impacts( + load_assets, + hazard_indicator_dict, + vulnerability_models_dict, + rcp_vulnerability_models_dict, + load_credentials, +): + assets = load_assets + + reader = ZarrReader() + + out = [] + empty_impact_count = 0 + asset_subtype_none_count = 0 + empty_impact_scenarios = [] + asset_subtype_none_assets = [] + exception_scenarios = [] + + for scenario_year, vulnerability_models in vulnerability_models_dict.items(): + scenario, year = scenario_year.split("_") + + vulnerability_models = DictBasedVulnerabilityModels( + {RealEstateAsset: vulnerability_models} + ) + + hazard_model = ZarrHazardModel( + source_paths=get_default_source_paths(), reader=reader + ) + + try: + results = calculate_impacts( + assets, + hazard_model, + vulnerability_models, + scenario=scenario, + year=int(year), + ) + except Exception as e: + exception_scenarios.append((scenario, year, str(e))) + continue + + for result, key in zip(results, results.keys()): + impact = results[key][0].impact + if isinstance(impact, EmptyImpactDistrib): + impact_mean = None + hazard_type = None + impact_distr_bin_edges = "0;0" + impact_distr_p = "0;0" + empty_impact_count += 1 + empty_impact_scenarios.append((scenario, year, result.asset.asset_name)) + else: + impact_mean = impact.mean_impact() + impact_distr_bin_edges = ";".join(impact.impact_bins.astype(str)) + impact_distr_p = ";".join(impact.prob.astype(str)) + hazard_type = ( + impact.hazard_type.__name__ + if impact.hazard_type.__name__ != "type" + else "Wind" + ) + + if hazard_type is None: + indicator_id = None + else: + indicator_id = hazard_indicator_dict.get(hazard_type) + + asset_subtype = result.asset.type if hasattr(result.asset, "type") else None + if asset_subtype is None: + asset_subtype_none_count += 1 + asset_subtype_none_assets.append(result.asset.asset_name) + + out.append( + { + "asset_name": result.asset.asset_name, + "asset_price": result.asset.asset_price, + "asset_loan_amount": result.asset.asset_loan_amount, + "asset_subtype": asset_subtype, + "latitude": result.asset.latitude, + "longitude": result.asset.longitude, + "hazard_type": hazard_type, + "indicator_id": indicator_id, + "display_name": "display_name_vacio", + "model": "model_vacio", + "scenario": scenario, + "year": int(year), + "return_periods": {"0": "0"}, + "parameter": 0, + "impact_mean": impact_mean, + "impact_distr_bin_edges": impact_distr_bin_edges, + "impact_distr_p": impact_distr_p, + "impact_exc_exceed_p": "0;0", + "impact_exc_values": "0;0", + "vuln_model_designer": "OS-C-RealEstate-LTV", + } + ) + + for scenario_year, vulnerability_models in rcp_vulnerability_models_dict.items(): + scenario, year = scenario_year.split("_") + + vulnerability_models = DictBasedVulnerabilityModels( + {RealEstateAsset: vulnerability_models} + ) + + hazard_model = ZarrHazardModel( + source_paths=get_default_source_paths(), reader=reader + ) + + try: + results = calculate_impacts( + assets, + hazard_model, + vulnerability_models, + scenario=scenario, + year=int(year), + ) + except Exception as e: + exception_scenarios.append((scenario, year, str(e))) + continue + + for result, key in zip(results, results.keys()): + impact = results[key][0].impact + if isinstance(impact, EmptyImpactDistrib): + impact_mean = None + hazard_type = None + impact_distr_bin_edges = "0;0" + impact_distr_p = "0;0" + empty_impact_count += 1 + empty_impact_scenarios.append((scenario, year, result.asset.asset_name)) + else: + impact_mean = impact.mean_impact() + impact_distr_bin_edges = ";".join(impact.impact_bins.astype(str)) + impact_distr_p = ";".join(impact.prob.astype(str)) + hazard_type = ( + impact.hazard_type.__name__ + if impact.hazard_type.__name__ != "type" + else "Wind" + ) + + if hazard_type is None: + indicator_id = None + else: + indicator_id = hazard_indicator_dict.get(hazard_type) + + asset_subtype = result.asset.type if hasattr(result.asset, "type") else None + if asset_subtype is None: + asset_subtype_none_count += 1 + asset_subtype_none_assets.append(result.asset.asset_name) + + out.append( + { + "asset_name": result.asset.asset_name, + "asset_price": result.asset.asset_price, + "asset_loan_amount": result.asset.asset_loan_amount, + "asset_subtype": asset_subtype, + "latitude": result.asset.latitude, + "longitude": result.asset.longitude, + "hazard_type": hazard_type, + "indicator_id": indicator_id, + "display_name": "display_name_vacio", + "model": "model_vacio", + "scenario": scenario, + "year": int(year), + "return_periods": {"0": "0"}, + "parameter": 0, + "impact_mean": impact_mean, + "impact_distr_bin_edges": impact_distr_bin_edges, + "impact_distr_p": impact_distr_p, + "impact_exc_exceed_p": "0;0", + "impact_exc_values": "0;0", + "vuln_model_designer": "OS-C-RealEstate-LTV", + } + ) + + # out can be used when dealing with expected values. + + # Assert the counts and details for empty impacts, None asset_subtype, and exceptions + assert ( + empty_impact_count == 0 + ), f"Found {empty_impact_count} EmptyImpactDistrib instances in scenarios: {empty_impact_scenarios}" + assert ( + asset_subtype_none_count == 0 + ), f"Found {asset_subtype_none_count} assets with None asset_subtype: {asset_subtype_none_assets}" + assert ( + not exception_scenarios + ), f"Exceptions occurred in scenarios: {exception_scenarios}" diff --git a/tests/models/real_estate_models_test.py b/tests/models/real_estate_models_test.py index 1d1df679..df1a73b9 100644 --- a/tests/models/real_estate_models_test.py +++ b/tests/models/real_estate_models_test.py @@ -1,290 +1,266 @@ -"""Test asset impact calculations.""" - -import unittest - import numpy as np from physrisk.data.pregenerated_hazard_model import ZarrHazardModel from physrisk.hazard_models.core_hazards import get_default_source_paths from physrisk.kernel.assets import RealEstateAsset from physrisk.kernel.hazards import CoastalInundation, RiverineInundation +from physrisk.kernel import calculation # noqa: F401 ## Avoid circular importsfrom physrisk.kernel.impact import ImpactKey, calculate_impacts from physrisk.kernel.impact import ImpactKey, calculate_impacts from physrisk.kernel.vulnerability_model import DictBasedVulnerabilityModels from physrisk.vulnerability_models.real_estate_models import ( RealEstateCoastalInundationModel, RealEstateRiverineInundationModel, ) - from ..data.hazard_model_store_test import TestData, mock_hazard_model_store_inundation -class TestRealEstateModels(unittest.TestCase): - """Tests RealEstateInundationModel.""" - - def test_real_estate_model_details(self): - curve = np.array( - [0.0596, 0.333, 0.505, 0.715, 0.864, 1.003, 1.149, 1.163, 1.163] - ) - store = mock_hazard_model_store_inundation( - TestData.longitudes, TestData.latitudes, curve - ) - hazard_model = ZarrHazardModel( - source_paths=get_default_source_paths(), store=store - ) - - # location="Europe", type="Buildings/Residential" - assets = [ - RealEstateAsset(lat, lon, location="Asia", type="Buildings/Industrial") - for lon, lat in zip(TestData.longitudes[0:1], TestData.latitudes[0:1]) - ] - - scenario = "rcp8p5" - year = 2080 - - vulnerability_models = DictBasedVulnerabilityModels( - {RealEstateAsset: [RealEstateRiverineInundationModel()]} - ) - - results = calculate_impacts( - assets, hazard_model, vulnerability_models, scenario=scenario, year=year - ) - - hazard_bin_edges = results[ - ImpactKey(assets[0], RiverineInundation, scenario, year) - ][0].event.intensity_bin_edges - hazard_bin_probs = results[ - ImpactKey(assets[0], RiverineInundation, scenario, year) - ][0].event.prob - - # check one: - # the probability of inundation greater than 0.505m in a year is 1/10.0 - # the probability of inundation greater than 0.333m in a year is 1/5.0 - # therefore the probability of an inundation between 0.333 and 0.505 in a year is 1/5.0 - 1/10.0 - np.testing.assert_almost_equal(hazard_bin_edges[1:3], np.array([0.333, 0.505])) - np.testing.assert_almost_equal(hazard_bin_probs[1], 0.1) - - # check that intensity bin edges for vulnerability matrix are same as for hazard - vulnerability_intensity_bin_edges = results[ - ImpactKey(assets[0], RiverineInundation, scenario, year) - ][0].vulnerability.intensity_bins - np.testing.assert_almost_equal( - vulnerability_intensity_bin_edges, hazard_bin_edges - ) +def test_real_estate_model_details(): + curve = np.array([0.0596, 0.333, 0.505, 0.715, 0.864, 1.003, 1.149, 1.163, 1.163]) + store = mock_hazard_model_store_inundation( + TestData.longitudes, TestData.latitudes, curve + ) + hazard_model = ZarrHazardModel(source_paths=get_default_source_paths(), store=store) + + assets = [ + RealEstateAsset(lat, lon, location="Asia", type="Buildings/Industrial") + for lon, lat in zip(TestData.longitudes[0:1], TestData.latitudes[0:1]) + ] + + scenario = "rcp8p5" + year = 2080 + + vulnerability_models = DictBasedVulnerabilityModels( + {RealEstateAsset: [RealEstateRiverineInundationModel()]} + ) + + results = calculate_impacts( + assets, hazard_model, vulnerability_models, scenario=scenario, year=year + ) + + hazard_bin_edges = results[ + ImpactKey(assets[0], RiverineInundation, scenario, year) + ][0].event.intensity_bin_edges + hazard_bin_probs = results[ + ImpactKey(assets[0], RiverineInundation, scenario, year) + ][0].event.prob + + # check one: + # the probability of inundation greater than 0.505m in a year is 1/10.0 + # the probability of inundation greater than 0.333m in a year is 1/5.0 + # therefore the probability of an inundation between 0.333 and 0.505 in a year is 1/5.0 - 1/10.0 + np.testing.assert_almost_equal(hazard_bin_edges[1:3], np.array([0.333, 0.505])) + np.testing.assert_almost_equal(hazard_bin_probs[1], 0.1) + + # check that intensity bin edges for vulnerability matrix are same as for hazard + vulnerability_intensity_bin_edges = results[ + ImpactKey(assets[0], RiverineInundation, scenario, year) + ][0].vulnerability.intensity_bins + np.testing.assert_almost_equal(vulnerability_intensity_bin_edges, hazard_bin_edges) + + # check the impact distribution the matrix is size [len(intensity_bins) - 1, len(impact_bins) - 1] + + cond_probs = results[ImpactKey(assets[0], RiverineInundation, scenario, year)][ + 0 + ].vulnerability.prob_matrix[1, :] + # check conditional prob for inundation intensity 0.333..0.505 + mean, std = np.mean(cond_probs), np.std(cond_probs) + np.testing.assert_almost_equal(cond_probs.sum(), 1) + np.testing.assert_allclose([mean, std], [0.09090909, 0.08184968], rtol=1e-6) + + # probability that impact occurs between impact bin edge 1 and impact bin edge 2 + prob_impact = np.dot( + hazard_bin_probs, + results[ImpactKey(assets[0], RiverineInundation, scenario, year)][ + 0 + ].vulnerability.prob_matrix[:, 1], + ) + np.testing.assert_almost_equal(prob_impact, 0.19350789547968042) - # check the impact distribution the matrix is size [len(intensity_bins) - 1, len(impact_bins) - 1] - cond_probs = results[ImpactKey(assets[0], RiverineInundation, scenario, year)][ + # no check with pre-calculated values for others: + np.testing.assert_allclose( + results[ImpactKey(assets[0], RiverineInundation, scenario, year)][ 0 - ].vulnerability.prob_matrix[1, :] - # check conditional prob for inundation intensity 0.333..0.505 - mean, std = np.mean(cond_probs), np.std(cond_probs) - np.testing.assert_almost_equal(cond_probs.sum(), 1) - np.testing.assert_allclose([mean, std], [0.09090909, 0.08184968], rtol=1e-6) - - # probability that impact occurs between impact bin edge 1 and impact bin edge 2 - prob_impact = np.dot( - hazard_bin_probs, - results[ImpactKey(assets[0], RiverineInundation, scenario, year)][ - 0 - ].vulnerability.prob_matrix[:, 1], - ) - np.testing.assert_almost_equal(prob_impact, 0.19350789547968042) - - # no check with pre-calculated values for others: - np.testing.assert_allclose( - results[ImpactKey(assets[0], RiverineInundation, scenario, year)][ - 0 - ].impact.prob, - np.array( - [ - 0.02815762, - 0.1935079, - 0.11701139, - 0.06043065, - 0.03347816, - 0.02111368, - 0.01504522, - 0.01139892, - 0.00864469, - 0.00626535, - 0.00394643, - ] - ), - rtol=2e-6, - ) + ].impact.prob, + np.array( + [ + 0.02815762, + 0.1935079, + 0.11701139, + 0.06043065, + 0.03347816, + 0.02111368, + 0.01504522, + 0.01139892, + 0.00864469, + 0.00626535, + 0.00394643, + ] + ), + rtol=2e-6, + ) - def test_coastal_real_estate_model(self): - curve = np.array([0.223, 0.267, 0.29, 0.332, 0.359, 0.386, 0.422, 0.449, 0.476]) - store = mock_hazard_model_store_inundation( - TestData.coastal_longitudes, TestData.coastal_latitudes, curve - ) - hazard_model = ZarrHazardModel( - source_paths=get_default_source_paths(), store=store - ) +def test_coastal_real_estate_model(): + curve = np.array([0.223, 0.267, 0.29, 0.332, 0.359, 0.386, 0.422, 0.449, 0.476]) - # location="Europe", type="Buildings/Residential" - assets = [ - RealEstateAsset(lat, lon, location="Asia", type="Buildings/Industrial") - for lon, lat in zip( - TestData.coastal_longitudes[0:1], TestData.coastal_latitudes[0:1] - ) - ] - - scenario = "rcp8p5" - year = 2080 + store = mock_hazard_model_store_inundation( + TestData.coastal_longitudes, TestData.coastal_latitudes, curve + ) + hazard_model = ZarrHazardModel(source_paths=get_default_source_paths(), store=store) - vulnerability_models = DictBasedVulnerabilityModels( - {RealEstateAsset: [RealEstateCoastalInundationModel()]} + assets = [ + RealEstateAsset(lat, lon, location="Asia", type="Buildings/Industrial") + for lon, lat in zip( + TestData.coastal_longitudes[0:1], TestData.coastal_latitudes[0:1] ) + ] - results = calculate_impacts( - assets, hazard_model, vulnerability_models, scenario=scenario, year=year - ) + scenario = "rcp8p5" + year = 2080 - np.testing.assert_allclose( - results[ImpactKey(assets[0], CoastalInundation, scenario, year)][ - 0 - ].impact.prob, - np.array( - [ - 2.78081230e-02, - 1.96296619e-01, - 1.32234770e-01, - 7.36581177e-02, - 3.83434609e-02, - 1.83916914e-02, - 7.97401009e-03, - 3.04271878e-03, - 9.79400125e-04, - 2.41250436e-04, - 2.98387241e-05, - ] - ), - rtol=2e-6, - ) + vulnerability_models = DictBasedVulnerabilityModels( + {RealEstateAsset: [RealEstateCoastalInundationModel()]} + ) + + results = calculate_impacts( + assets, hazard_model, vulnerability_models, scenario=scenario, year=year + ) - def test_commercial_real_estate_model_details(self): - curve = np.array( + np.testing.assert_allclose( + results[ImpactKey(assets[0], CoastalInundation, scenario, year)][0].impact.prob, + np.array( [ - 2.8302893e-06, - 0.09990284, - 0.21215445, - 0.531271, - 0.7655724, - 0.99438345, - 1.2871761, - 1.502281, - 1.7134278, + 2.78081230e-02, + 1.96296619e-01, + 1.32234770e-01, + 7.36581177e-02, + 3.83434609e-02, + 1.83916914e-02, + 7.97401009e-03, + 3.04271878e-03, + 9.79400125e-04, + 2.41250436e-04, + 2.98387241e-05, ] - ) - store = mock_hazard_model_store_inundation( - TestData.longitudes, TestData.latitudes, curve - ) - hazard_model = ZarrHazardModel( - source_paths=get_default_source_paths(), store=store - ) - - # location="South America", type="Buildings/Commercial" - assets = [ - RealEstateAsset( - lat, lon, location="South America", type="Buildings/Commercial" - ) - for lon, lat in zip(TestData.longitudes[-4:-3], TestData.latitudes[-4:-3]) + ), + rtol=2e-6, + ) + + +def test_commercial_real_estate_model_details(): + curve = np.array( + [ + 2.8302893e-06, + 0.09990284, + 0.21215445, + 0.531271, + 0.7655724, + 0.99438345, + 1.2871761, + 1.502281, + 1.7134278, ] - - scenario = "rcp8p5" - year = 2080 - - # impact bin edges are calibrated so that hazard_bin_probs == impact_bin_probs - # when the impact standard deviation is negligible: - vulnerability_models = DictBasedVulnerabilityModels( - { - RealEstateAsset: [ - RealEstateRiverineInundationModel( - impact_bin_edges=np.array( - [ - 0, - 0.030545039098059, - 0.125953058445539, - 0.322702019487674, - 0.566880882840096, - 0.731980974578735, - 0.823993215529066, - 0.884544511664047, - 0.922115133960502, - 0.969169745946688, - 1.0, - ] - ) + ) + store = mock_hazard_model_store_inundation( + TestData.longitudes, TestData.latitudes, curve + ) + hazard_model = ZarrHazardModel(source_paths=get_default_source_paths(), store=store) + + assets = [ + RealEstateAsset(lat, lon, location="South America", type="Buildings/Commercial") + for lon, lat in zip(TestData.longitudes[-4:-3], TestData.latitudes[-4:-3]) + ] + + scenario = "rcp8p5" + year = 2080 + + # impact bin edges are calibrated so that hazard_bin_probs == impact_bin_probs + # when the impact standard deviation is negligible: + vulnerability_models = DictBasedVulnerabilityModels( + { + RealEstateAsset: [ + RealEstateRiverineInundationModel( + impact_bin_edges=np.array( + [ + 0, + 0.030545039098059, + 0.125953058445539, + 0.322702019487674, + 0.566880882840096, + 0.731980974578735, + 0.823993215529066, + 0.884544511664047, + 0.922115133960502, + 0.969169745946688, + 1.0, + ] ) - ] - } - ) - - results = calculate_impacts( - assets, hazard_model, vulnerability_models, scenario=scenario, year=year - ) - - hazard_bin_edges = results[ - ImpactKey(assets[0], RiverineInundation, scenario, year) - ][0].event.intensity_bin_edges - hazard_bin_probs = results[ - ImpactKey(assets[0], RiverineInundation, scenario, year) - ][0].event.prob - - # check one: - # the probability of inundation greater than 0.531271m in a year is 1/25 - # the probability of inundation greater than 0.21215445m in a year is 1/10 - # therefore the probability of an inundation between 0.21215445 and 0.531271 in a year is 1/10 - 1/25 - np.testing.assert_almost_equal( - hazard_bin_edges[2:4], np.array([0.21215445, 0.531271]) - ) - np.testing.assert_almost_equal(hazard_bin_probs[2], 0.06) - - # check that intensity bin edges for vulnerability matrix are same as for hazard - vulnerability_intensity_bin_edges = results[ - ImpactKey(assets[0], RiverineInundation, scenario, year) - ][0].vulnerability.intensity_bins - np.testing.assert_almost_equal( - vulnerability_intensity_bin_edges, hazard_bin_edges - ) + ) + ] + } + ) + + results = calculate_impacts( + assets, hazard_model, vulnerability_models, scenario=scenario, year=year + ) + + hazard_bin_edges = results[ + ImpactKey(assets[0], RiverineInundation, scenario, year) + ][0].event.intensity_bin_edges + hazard_bin_probs = results[ + ImpactKey(assets[0], RiverineInundation, scenario, year) + ][0].event.prob + + # check one: + # the probability of inundation greater than 0.531271m in a year is 1/25 + # the probability of inundation greater than 0.21215445m in a year is 1/10 + # therefore the probability of an inundation between 0.21215445 and 0.531271 in a year is 1/10 - 1/25 + np.testing.assert_almost_equal( + hazard_bin_edges[2:4], np.array([0.21215445, 0.531271]) + ) + np.testing.assert_almost_equal(hazard_bin_probs[2], 0.06) + + # check that intensity bin edges for vulnerability matrix are same as for hazard + vulnerability_intensity_bin_edges = results[ + ImpactKey(assets[0], RiverineInundation, scenario, year) + ][0].vulnerability.intensity_bins + np.testing.assert_almost_equal(vulnerability_intensity_bin_edges, hazard_bin_edges) + + # check the impact distribution the matrix is size [len(intensity_bins) - 1, len(impact_bins) - 1] + cond_probs = results[ImpactKey(assets[0], RiverineInundation, scenario, year)][ + 0 + ].vulnerability.prob_matrix[2, :] + # check conditional prob for inundation intensity at 0.371712725m + mean, std = np.mean(cond_probs), np.std(cond_probs) + np.testing.assert_almost_equal(cond_probs.sum(), 1) + np.testing.assert_allclose([mean, std], [0.1, 0.2884275164878624], rtol=1e-6) + + # probability that impact occurs between impact bin edge 2 and impact bin edge 3 + prob_impact = np.dot( + hazard_bin_probs, + results[ImpactKey(assets[0], RiverineInundation, scenario, year)][ + 0 + ].vulnerability.prob_matrix[:, 2], + ) + np.testing.assert_almost_equal(prob_impact, 0.10040196672295522) - # check the impact distribution the matrix is size [len(intensity_bins) - 1, len(impact_bins) - 1] - cond_probs = results[ImpactKey(assets[0], RiverineInundation, scenario, year)][ + np.testing.assert_allclose( + results[ImpactKey(assets[0], RiverineInundation, scenario, year)][ 0 - ].vulnerability.prob_matrix[2, :] - # check conditional prob for inundation intensity at 0.371712725m - mean, std = np.mean(cond_probs), np.std(cond_probs) - np.testing.assert_almost_equal(cond_probs.sum(), 1) - np.testing.assert_allclose([mean, std], [0.1, 0.2884275164878624], rtol=1e-6) - - # probability that impact occurs between impact bin edge 2 and impact bin edge 3 - prob_impact = np.dot( - hazard_bin_probs, - results[ImpactKey(assets[0], RiverineInundation, scenario, year)][ - 0 - ].vulnerability.prob_matrix[:, 2], - ) - np.testing.assert_almost_equal(prob_impact, 0.10040196672295522) - - # no check with pre-calculated values for others: - np.testing.assert_allclose( - results[ImpactKey(assets[0], RiverineInundation, scenario, year)][ - 0 - ].impact.prob, - np.array( - [ - 2.009085e-07, - 3.001528e-01, - 1.004020e-01, - 5.885136e-02, - 1.760415e-02, - 1.159864e-02, - 6.130639e-03, - 2.729225e-03, - 1.446537e-03, - 8.450993e-05, - ] - ), - rtol=2e-6, - ) + ].impact.prob, + np.array( + [ + 2.009085e-07, + 3.001528e-01, + 1.004020e-01, + 5.885136e-02, + 1.760415e-02, + 1.159864e-02, + 6.130639e-03, + 2.729225e-03, + 1.446537e-03, + 8.450993e-05, + ] + ), + rtol=2e-6, + ) diff --git a/tests/models/wbgt_model_test.py b/tests/models/wbgt_model_test.py index ea4f5195..e2b46344 100644 --- a/tests/models/wbgt_model_test.py +++ b/tests/models/wbgt_model_test.py @@ -1,7 +1,7 @@ -import unittest from typing import Iterable, List, Union, cast import numpy as np +import pytest from physrisk.data.pregenerated_hazard_model import ZarrHazardModel from physrisk.hazard_models.core_hazards import get_default_source_paths @@ -12,6 +12,7 @@ HazardParameterDataResponse, ) from physrisk.kernel.hazards import ChronicHeat +from physrisk.kernel import calculation # noqa: F401 ## Avoid circular imports from physrisk.kernel.impact import calculate_impacts from physrisk.kernel.impact_distrib import ImpactDistrib, ImpactType from physrisk.kernel.vulnerability_model import DictBasedVulnerabilityModels @@ -19,7 +20,6 @@ ChronicHeatGZNModel, get_impact_distrib, ) - from ..data.hazard_model_store_test import TestData, mock_hazard_model_store_heat_wbgt @@ -44,7 +44,6 @@ def get_data_requests( self, asset: Asset, *, scenario: str, year: int ) -> Union[HazardDataRequest, Iterable[HazardDataRequest]]: """Request the hazard data needed by the vulnerability model for a specific asset - (this is a Google-style doc string) Args: asset: Asset for which data is requested. @@ -221,68 +220,70 @@ def two_variable_joint_variance(ex, varx, ey, vary): return varx * vary + varx * (ey**2) + vary * (ex**2) -class TestChronicAssetImpact(unittest.TestCase): - """Tests the impact on an asset of a chronic hazard model.""" - - def test_wbgt_vulnerability(self): - store = mock_hazard_model_store_heat_wbgt( - TestData.longitudes, TestData.latitudes - ) - hazard_model = ZarrHazardModel( - source_paths=get_default_source_paths(), store=store +@pytest.mark.parametrize( + "asset_type, expected_value", + [ + ( + "high", + np.array( + [ + 0.00000119194, + 0.00000046573, + 0.00000063758, + 0.00000086889, + 0.00000117871, + 0.00000159172, + 0.00000213966, + 0.00000286314, + 0.00000381379, + 0.00000505696, + 0.00021143251, + 0.00167372506, + 0.00924050344, + 0.03560011430, + 0.09575512509, + 0.17988407024, + 0.23607703667, + 0.21646814108, + 0.13867487025, + 0.06205630207, + 0.02433887116, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + 0.00000000000, + ] + ), ) - # 'chronic_heat/osc/v2/mean_work_loss_high_ACCESS-CM2_historical_2005' - scenario = "ssp585" - year = 2050 + # Add other test cases here + ], +) +def test_wbgt_vulnerability(asset_type, expected_value): + store = mock_hazard_model_store_heat_wbgt(TestData.longitudes, TestData.latitudes) + hazard_model = ZarrHazardModel(source_paths=get_default_source_paths(), store=store) + # 'chronic_heat/osc/v2/mean_work_loss_high_ACCESS-CM2_historical_2005' - vulnerability_models = DictBasedVulnerabilityModels( - {IndustrialActivity: [ExampleWBGTGZNJointModel()]} - ) + scenario = "ssp585" + year = 2050 - assets = [ - IndustrialActivity(lat, lon, type="high") - for lon, lat in zip(TestData.longitudes, TestData.latitudes) - ][:1] + vulnerability_models = DictBasedVulnerabilityModels( + {IndustrialActivity: [ExampleWBGTGZNJointModel()]} + ) - results = calculate_impacts( - assets, hazard_model, vulnerability_models, scenario=scenario, year=year - ) + assets = [ + IndustrialActivity(lat, lon, type=asset_type) + for lon, lat in zip(TestData.longitudes, TestData.latitudes) + ][:1] - value_test = list(results.values())[0][0].impact.prob - - value_exp = np.array( - [ - 0.00000119194, - 0.00000046573, - 0.00000063758, - 0.00000086889, - 0.00000117871, - 0.00000159172, - 0.00000213966, - 0.00000286314, - 0.00000381379, - 0.00000505696, - 0.00021143251, - 0.00167372506, - 0.00924050344, - 0.03560011430, - 0.09575512509, - 0.17988407024, - 0.23607703667, - 0.21646814108, - 0.13867487025, - 0.06205630207, - 0.02433887116, - 0.00000000000, - 0.00000000000, - 0.00000000000, - 0.00000000000, - 0.00000000000, - 0.00000000000, - 0.00000000000, - 0.00000000000, - 0.00000000000, - 0.00000000000, - ] - ) - np.testing.assert_almost_equal(value_test, value_exp, decimal=8) + results = calculate_impacts( + assets, hazard_model, vulnerability_models, scenario=scenario, year=year + ) + + value_test = list(results.values())[0][0].impact.prob + np.allclose(value_test, expected_value, 1e-4) diff --git a/tests/models/wind_models_test.py b/tests/models/wind_models_test.py index 7b7e64b9..3aa1a0d5 100644 --- a/tests/models/wind_models_test.py +++ b/tests/models/wind_models_test.py @@ -1,6 +1,6 @@ import numpy as np +import pytest -import tests.data.hazard_model_store_test as hms from physrisk.data.pregenerated_hazard_model import ZarrHazardModel from physrisk.hazard_models.core_hazards import ( ResourceSubset, @@ -8,28 +8,77 @@ ) from physrisk.kernel.assets import RealEstateAsset from physrisk.kernel.hazards import Wind +from physrisk.kernel import calculation # noqa: F401 ## Avoid circular imports from physrisk.kernel.impact import calculate_impacts from physrisk.kernel.vulnerability_model import DictBasedVulnerabilityModels from physrisk.vulnerability_models.real_estate_models import GenericTropicalCycloneModel +from ..data.hazard_model_store_test import ( + zarr_memory_store, + add_curves, + TestData, + shape_transform_21600_43200, +) -def test_wind_real_estate_model(): +@pytest.fixture +def hazard_model_setup(): scenario = "rcp8p5" year = 2080 - # mock some IRIS data for the calculation: - store, root = hms.zarr_memory_store() - # fmt: off - return_periods = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0] # noqa - intensity = np.array([37.279999, 44.756248, 48.712502, 51.685001, 53.520000, 55.230000, 56.302502, 57.336250, 58.452499, 59.283749, 63.312500, 65.482498, 66.352501, 67.220001, 67.767502, 68.117500, 68.372498, 69.127502, 70.897499 ]) # noqa - # fmt: on - shape, transform = hms.shape_transform_21600_43200(return_periods=return_periods) - path = f"wind/iris/v1/max_speed_{scenario}_{year}".format( - scenario=scenario, year=year + + # Mock some IRIS data for the calculation: + store, root = zarr_memory_store() + return_periods = [ + 10.0, + 20.0, + 30.0, + 40.0, + 50.0, + 60.0, + 70.0, + 80.0, + 90.0, + 100.0, + 200.0, + 300.0, + 400.0, + 500.0, + 600.0, + 700.0, + 800.0, + 900.0, + 1000.0, + ] + intensity = np.array( + [ + 37.279999, + 44.756248, + 48.712502, + 51.685001, + 53.520000, + 55.230000, + 56.302502, + 57.336250, + 58.452499, + 59.283749, + 63.312500, + 65.482498, + 66.352501, + 67.220001, + 67.767502, + 68.117500, + 68.372498, + 69.127502, + 70.897499, + ] ) - hms.add_curves( + + shape, transform = shape_transform_21600_43200(return_periods=return_periods) + path = f"wind/iris/v1/max_speed_{scenario}_{year}" + + add_curves( root, - hms.TestData.longitudes, - hms.TestData.latitudes, + TestData.longitudes, + TestData.latitudes, path, shape, intensity, @@ -44,21 +93,31 @@ def select_iris_osc( ): return candidates.with_group_id("iris_osc").first() - # specify use of IRIS (OSC contribution) + # Specify use of IRIS (OSC contribution) provider.add_selector(Wind, "max_speed", select_iris_osc) hazard_model = ZarrHazardModel(source_paths=provider.source_paths(), store=store) + + return hazard_model, scenario, year, return_periods, intensity + + +def test_wind_real_estate_model(hazard_model_setup): + hazard_model, scenario, year, return_periods, intensity = hazard_model_setup + assets = [ RealEstateAsset(lat, lon, location="Asia", type="Buildings/Industrial") - for lon, lat in zip(hms.TestData.longitudes[0:1], hms.TestData.latitudes[0:1]) + for lon, lat in zip(TestData.longitudes[0:1], TestData.latitudes[0:1]) ] + vulnerability_models = DictBasedVulnerabilityModels( {RealEstateAsset: [GenericTropicalCycloneModel()]} ) + results = calculate_impacts( assets, hazard_model, vulnerability_models, scenario=scenario, year=year ) - # check calculation + + # Check calculation cum_probs = 1.0 / np.array(return_periods) probs = cum_probs[:-1] - cum_probs[1:] model = GenericTropicalCycloneModel() @@ -70,4 +129,5 @@ def select_iris_osc( impact_distrib = results[(assets[0], Wind, scenario, year)][0].impact mean_impact = impact_distrib.mean_impact() + np.testing.assert_allclose(mean_impact, mean_check) diff --git a/tests/models/wind_turbine_models_test.py b/tests/models/wind_turbine_models_test.py index 6c01e371..a82d579b 100644 --- a/tests/models/wind_turbine_models_test.py +++ b/tests/models/wind_turbine_models_test.py @@ -1,7 +1,7 @@ import typing -import unittest import numpy as np +import pytest from physrisk.kernel.assets import WindTurbine from physrisk.kernel.events import ( @@ -24,8 +24,10 @@ def get_impact( class WindTurbineModel(SupportsEventImpact[WindTurbine]): """Placeholder wind turbine model to be populated.""" - def prob_collapse(self, turbine: WindTurbine, wind_speed_hub: np.ndarray): - """Calculates probability of turbine collapse for a num ber of events given the wind speed + def prob_collapse( + self, turbine: WindTurbine, wind_speed_hub: np.ndarray + ) -> np.ndarray: + """Calculates probability of turbine collapse for a number of events given the wind speed at the hub per event and characteristics of the turbine. Args: @@ -38,25 +40,22 @@ def prob_collapse(self, turbine: WindTurbine, wind_speed_hub: np.ndarray): # just a placeholder model that returns a probability of 0.3 for all events, regardless of wind speed! return np.ones_like(wind_speed_hub) * 0.3 - def get_impact(self, asset: WindTurbine, event_data: HazardEventDataResponse): + def get_impact( + self, asset: WindTurbine, event_data: HazardEventDataResponse + ) -> MultivariateDistribution: """Returns the probability distributions of fractional damage to the turbine for each event. Args: asset (WindTurbine): Wind turbine asset. - event_data (HazardEventDataResponse): Provides wind speeds for different events with different probabilities. # noqa: E501 - - Raises: - NotImplementedError: Supports only the case of single probability bin: i.e. for each event the wind speed is deterministic. # noqa: E501 + event_data (HazardEventDataResponse): Provides wind speeds for different events with different probabilities. Returns: MultivariateDistribution: Probability distribution of impacts associated with events. """ intens = event_data.intensities - # shape is (nb_events, nb_prob_bins) if intens.ndim > 1 and intens.shape[0] != 1: - # only the single probability bin case implemented raise NotImplementedError() - wind_speed = intens.squeeze(axis=0) # vector of wind speeds + wind_speed = intens.squeeze(axis=0) pc = self.prob_collapse(asset, wind_speed) bins_lower = np.array([0.0, 1.0]) bins_upper = np.array([0.0, 1.0]) @@ -64,117 +63,120 @@ def get_impact(self, asset: WindTurbine, event_data: HazardEventDataResponse): return EmpiricalMultivariateDistribution(bins_lower, bins_upper, probs) -class TestWindTurbineModels: - def test_cumulative_probs(self): - """Test calculation of cumulative probability from a combination of lumped probability and uniform probability - density bins. - """ - bins_lower = np.array([2.0, 3.0]) - bins_upper = np.array([2.5, 3.5]) - probs = np.array([[0.5, 0.5], [0.2, 0.8]]) - values, cum_probs = calculate_cumulative_probs(bins_lower, bins_upper, probs) - np.testing.assert_almost_equal(values, [2.0, 2.5, 3.0, 3.5]) - np.testing.assert_almost_equal(cum_probs[0, :], [0, 0.5, 0.5, 1.0]) - np.testing.assert_almost_equal(cum_probs[1, :], [0, 0.2, 0.2, 1.0]) - - bins_lower = np.array([2.0]) - bins_upper = np.array([2.0]) - probs = np.array([[1.0], [1.0]]) - values, cum_probs = calculate_cumulative_probs(bins_lower, bins_upper, probs) - np.testing.assert_almost_equal(values, [2.0, 2.0]) - np.testing.assert_almost_equal(cum_probs[0, :], [0, 1.0]) - np.testing.assert_almost_equal(cum_probs[1, :], [0, 1.0]) - - bins_lower = np.array([2.0, 2.0]) - bins_upper = np.array([2.0, 3.0]) - probs = np.array([[0.5, 0.5], [0.1, 0.9]]) - values, cum_probs = calculate_cumulative_probs(bins_lower, bins_upper, probs) - np.testing.assert_almost_equal(values, [2.0, 2.0, 3.0]) - np.testing.assert_almost_equal(cum_probs[0, :], [0.0, 0.5, 1.0]) - np.testing.assert_almost_equal(cum_probs[1, :], [0, 0.1, 1.0]) - - bins_lower = np.array([2.0, 3.0, 4.0, 5.0, 6.0]) - bins_upper = np.array([2.0, 3.0, 5.0, 5.5, 6.0]) - probs = np.array([[0.1, 0.2, 0.3, 0.2, 0.2], [0.1, 0.1, 0.1, 0.1, 0.6]]) - values, cum_probs = calculate_cumulative_probs(bins_lower, bins_upper, probs) - np.testing.assert_almost_equal( - values, [2.0, 2.0, 3.0, 3.0, 4.0, 5.0, 5.5, 6.0, 6.0] - ) - np.testing.assert_almost_equal( - cum_probs[0, :], [0, 0.1, 0.1, 0.3, 0.3, 0.6, 0.8, 0.8, 1.0] - ) - np.testing.assert_almost_equal( - cum_probs[1, :], [0, 0.1, 0.1, 0.2, 0.2, 0.3, 0.4, 0.4, 1.0] - ) - - def test_sampling(self): - """Test sampling from probability distributions comprising two lumped probabilities.""" - bins_lower = np.array([2.0, 3.0]) - bins_upper = np.array([2.0, 3.0]) - probs = np.array([[0.3, 0.7], [0.4, 0.6]]) - pdf = EmpiricalMultivariateDistribution(bins_lower, bins_upper, probs) - gen = np.random.Generator(np.random.MT19937(111)) - samples = pdf.inv_cumulative_marginal_probs(gen.random(size=(2, 10000))) - check_2 = np.count_nonzero(samples[0, :] == 2.0) - check_3 = np.count_nonzero(samples[0, :] == 3.0) - assert check_2 == 3062 - assert check_3 == 6938 - check_2 = np.count_nonzero(samples[1, :] == 2.0) - check_3 = np.count_nonzero(samples[1, :] == 3.0) - assert check_2 == 3924 - assert check_3 == 6076 - - @unittest.skip("Performance test: slow.") - def test_performance(self): - nb_events = 10000 - nb_samples = 10 - bins_lower = np.array([0.0, 1.0]) - bins_upper = np.array([0.0, 1.0]) - probs = np.tile(np.array([[0.3, 0.7]]), reps=(nb_events, 1)) - pdf = EmpiricalMultivariateDistribution(bins_lower, bins_upper, probs) - gen = np.random.Generator(np.random.MT19937(111)) +@pytest.mark.parametrize( + "bins_lower, bins_upper, probs, expected_values, expected_cum_probs", + [ + ( + np.array([2.0, 3.0]), + np.array([2.5, 3.5]), + np.array([[0.5, 0.5], [0.2, 0.8]]), + [2.0, 2.5, 3.0, 3.5], + [[0, 0.5, 0.5, 1.0], [0, 0.2, 0.2, 1.0]], + ), + ( + np.array([2.0]), + np.array([2.0]), + np.array([[1.0], [1.0]]), + [2.0, 2.0], + [[0, 1.0], [0, 1.0]], + ), + ( + np.array([2.0, 2.0]), + np.array([2.0, 3.0]), + np.array([[0.5, 0.5], [0.1, 0.9]]), + [2.0, 2.0, 3.0], + [[0.0, 0.5, 1.0], [0, 0.1, 1.0]], + ), + ( + np.array([2.0, 3.0, 4.0, 5.0, 6.0]), + np.array([2.0, 3.0, 5.0, 5.5, 6.0]), + np.array([[0.1, 0.2, 0.3, 0.2, 0.2], [0.1, 0.1, 0.1, 0.1, 0.6]]), + [2.0, 2.0, 3.0, 3.0, 4.0, 5.0, 5.5, 6.0, 6.0], + [ + [0, 0.1, 0.1, 0.3, 0.3, 0.6, 0.8, 0.8, 1.0], + [0, 0.1, 0.1, 0.2, 0.2, 0.3, 0.4, 0.4, 1.0], + ], + ), + ], +) +def test_cumulative_probs( + bins_lower, bins_upper, probs, expected_values, expected_cum_probs +): + values, cum_probs = calculate_cumulative_probs(bins_lower, bins_upper, probs) + np.testing.assert_almost_equal(values, expected_values) + np.testing.assert_almost_equal(cum_probs[0, :], expected_cum_probs[0]) + np.testing.assert_almost_equal(cum_probs[1, :], expected_cum_probs[1]) + + +def test_sampling(): + """Test sampling from probability distributions comprising two lumped probabilities.""" + bins_lower = np.array([2.0, 3.0]) + bins_upper = np.array([2.0, 3.0]) + probs = np.array([[0.3, 0.7], [0.4, 0.6]]) + pdf = EmpiricalMultivariateDistribution(bins_lower, bins_upper, probs) + gen = np.random.Generator(np.random.MT19937(111)) + samples = pdf.inv_cumulative_marginal_probs(gen.random(size=(2, 10000))) + check_2 = np.count_nonzero(samples[0, :] == 2.0) + check_3 = np.count_nonzero(samples[0, :] == 3.0) + assert check_2 == 3062 + assert check_3 == 6938 + check_2 = np.count_nonzero(samples[1, :] == 2.0) + check_3 = np.count_nonzero(samples[1, :] == 3.0) + assert check_2 == 3924 + assert check_3 == 6076 + + +@pytest.mark.performance +def test_performance(): + """Performance test: slow.""" + nb_events = 10000 + nb_samples = 10 + bins_lower = np.array([0.0, 1.0]) + bins_upper = np.array([0.0, 1.0]) + probs = np.tile(np.array([[0.3, 0.7]]), reps=(nb_events, 1)) + pdf = EmpiricalMultivariateDistribution(bins_lower, bins_upper, probs) + gen = np.random.Generator(np.random.MT19937(111)) + for _ in range(1000): uniforms = gen.random(size=(nb_events, nb_samples)) samples = pdf.inv_cumulative_marginal_probs(uniforms) - for _i in range(1000): - uniforms = gen.random(size=(nb_events, nb_samples)) - samples = pdf.inv_cumulative_marginal_probs(uniforms) - print(samples) - - def test_rotor_damage_event_based(self): - """Test demonstrating how WindTurbineModel can be used in an event-based calculation.""" - # The data response for a single asset will be a HazardEventDataResponse. - # The format supports a distribution of hazard intensities *per event* - # but here we simply have a single realisation per event. - - # The hazard model is responsible for sourcing the events. Here we provide output from - # that model directly. - - rng = np.random.Generator(np.random.MT19937(111)) - asset1, asset2 = WindTurbine(), WindTurbine() - nb_events = 20 - response_asset1 = HazardEventDataResponse( - np.array([1.0]), np.array(rng.weibull(a=4, size=[1, nb_events])) - ) - response_asset2 = HazardEventDataResponse( - np.array([1.0]), np.array(rng.weibull(a=4, size=[1, nb_events])) - ) - - turbine_model = WindTurbineModel() - - # provides the impact distributions for each asset - impacts_asset1 = turbine_model.get_impact(asset1, response_asset1) - impacts_asset2 = turbine_model.get_impact(asset2, response_asset2) - - # we sample 10 times for each event for each asset - uniforms = rng.random(size=(nb_events, 10)) - samples_asset1 = impacts_asset1.inv_cumulative_marginal_probs(uniforms) - samples_asset2 = impacts_asset2.inv_cumulative_marginal_probs(uniforms) - - # we can then combine samples and calculate measures... - # for now just sanity-check that we get the approx. 0.3 of total loss in events from placeholder. - np.testing.assert_almost_equal( - np.count_nonzero(samples_asset1 == 1.0) / samples_asset1.size, 0.31 - ) - np.testing.assert_almost_equal( - np.count_nonzero(samples_asset2 == 1.0) / samples_asset2.size, 0.31 - ) + print(samples) + + +def test_rotor_damage_event_based(): + """Test demonstrating how WindTurbineModel can be used in an event-based calculation.""" + # The data response for a single asset will be a HazardEventDataResponse. + # The format supports a distribution of hazard intensities *per event* + # but here we simply have a single realisation per event. + + # The hazard model is responsible for sourcing the events. Here we provide output from + # that model directly. + + rng = np.random.Generator(np.random.MT19937(111)) + asset1, asset2 = WindTurbine(), WindTurbine() + nb_events = 20 + response_asset1 = HazardEventDataResponse( + np.array([1.0]), np.array(rng.weibull(a=4, size=[1, nb_events])) + ) + response_asset2 = HazardEventDataResponse( + np.array([1.0]), np.array(rng.weibull(a=4, size=[1, nb_events])) + ) + + turbine_model = WindTurbineModel() + + # provides the impact distributions for each asset + impacts_asset1 = turbine_model.get_impact(asset1, response_asset1) + impacts_asset2 = turbine_model.get_impact(asset2, response_asset2) + + # we sample 10 times for each event for each asset + uniforms = rng.random(size=(nb_events, 10)) + samples_asset1 = impacts_asset1.inv_cumulative_marginal_probs(uniforms) + samples_asset2 = impacts_asset2.inv_cumulative_marginal_probs(uniforms) + + # we can then combine samples and calculate measures... + # for now just sanity-check that we get the approx. 0.3 of total loss in events from placeholder. + np.testing.assert_almost_equal( + np.count_nonzero(samples_asset1 == 1.0) / samples_asset1.size, 0.31 + ) + np.testing.assert_almost_equal( + np.count_nonzero(samples_asset2 == 1.0) / samples_asset2.size, 0.31 + ) diff --git a/tests/risk_models/risk_models_stress_test_test.py b/tests/risk_models/risk_models_stress_test_test.py new file mode 100644 index 00000000..895de344 --- /dev/null +++ b/tests/risk_models/risk_models_stress_test_test.py @@ -0,0 +1,214 @@ +from typing import Dict, Sequence + +import numpy as np +import pytest +from dependency_injector import providers + +import physrisk.data.static.world as wd +from physrisk.api.v1.common import UseCaseId +from physrisk.api.v1.impact_req_resp import ( + AssetImpactResponse, + RiskMeasureKey, + RiskMeasuresHelper, +) +from physrisk.container import Container +from physrisk.data.pregenerated_hazard_model import ZarrHazardModel +from physrisk.data.zarr_reader import ZarrReader +from physrisk.hazard_models.core_hazards import get_default_source_paths +from physrisk.kernel import calculation # noqa: F401 ## Avoid circular imports +from physrisk.kernel.assets import ThermalPowerGeneratingAsset +from physrisk.kernel.hazard_model import HazardModelFactory +from physrisk.kernel.hazards import ( + WaterRisk, +) +from physrisk.kernel.risk import AssetLevelRiskModel, MeasureKey +from physrisk.kernel.vulnerability_model import ( + DictBasedVulnerabilityModels, + VulnerabilityModelsFactory, +) +from physrisk.requests import _create_risk_measures +from physrisk.vulnerability_models.thermal_power_generation_models import ( + ThermalPowerGenerationAqueductWaterRiskModel, +) + + +@pytest.fixture +def create_assets(wri_power_plant_assets): + asset_list = wri_power_plant_assets + filtered = asset_list.loc[ + asset_list["primary_fuel"].isin(["Coal", "Gas", "Nuclear", "Oil"]) + ] + filtered = filtered[-60 < filtered["latitude"]] + + longitudes = np.array(filtered["longitude"]) + latitudes = np.array(filtered["latitude"]) + + primary_fuels = np.array( + [ + primary_fuel.replace(" and ", "And").replace(" ", "") + for primary_fuel in filtered["primary_fuel"] + ] + ) + + # Capacity describes a maximum electric power rate. + # Generation describes the actual electricity output of the plant over a period of time. + capacities = np.array(filtered["capacity_mw"]) + + countries, continents = wd.get_countries_and_continents( + latitudes=latitudes, longitudes=longitudes + ) + + assets = [] + for latitude, longitude, capacity, primary_fuel, country in zip( + latitudes, + longitudes, + capacities, + primary_fuels, + countries, + ): + if country in ["Spain"]: + assets.append( + ThermalPowerGeneratingAsset( + latitude, + longitude, + type=primary_fuel, + location=country, + capacity=capacity, + ) + ) + + return assets + + +def create_assets_json(assets: Sequence[ThermalPowerGeneratingAsset]): + assets_dict = { + "items": [ + { + "asset_class": type(asset).__name__, + "type": asset.type, + "location": asset.location, + "longitude": asset.longitude, + "latitude": asset.latitude, + } + for asset in assets + ], + } + return assets_dict + + +def test_risk_indicator_model(load_credentials, create_assets): + scenarios = ["ssp585"] + years = [2030] + reader = ZarrReader() + hazard_model = ZarrHazardModel( + source_paths=get_default_source_paths(), reader=reader + ) + assets = create_assets + + vulnerability_models = DictBasedVulnerabilityModels( + {ThermalPowerGeneratingAsset: [ThermalPowerGenerationAqueductWaterRiskModel()]} + ) + + model = AssetLevelRiskModel( + hazard_model=hazard_model, + vulnerability_models=vulnerability_models, + use_case_id=UseCaseId.STRESS_TEST, + ) + + measure_ids_for_asset, definitions = model.populate_measure_definitions(assets) + _, measures = model.calculate_risk_measures( + assets, prosp_scens=scenarios, years=years + ) + + measure = measures[MeasureKey(assets[0], scenarios[0], years[0], WaterRisk)] + score = measure.score + measure_0 = measure.measure_0 + np.testing.assert_allclose([measure_0], [0.30079105]) + + risk_measures = _create_risk_measures( + measures, measure_ids_for_asset, definitions, assets, scenarios, years + ) + key = RiskMeasureKey( + hazard_type="WaterRisk", + scenario_id=scenarios[0], + year=str(years[0]), + measure_id=risk_measures.score_based_measure_set_defn.measure_set_id, + ) + item = next(m for m in risk_measures.measures_for_assets if m.key == key) + score2 = item.scores[0] + measure_0_2 = item.measures_0[0] + assert score == score2 + assert measure_0 == measure_0_2 + + helper = RiskMeasuresHelper(risk_measures) + asset_scores, measures, definitions = helper.get_measure( + "WaterRisk", scenarios[0], years[0] + ) + label, description = helper.get_score_details(asset_scores[0], definitions[0]) + assert asset_scores[0] == pytest.approx(0) # TODO check if asserts are correct + + +def test_via_requests(load_credentials, create_assets): + scenarios = ["ssp585"] + years = [2030] + reader = ZarrReader() + hazard_model = ZarrHazardModel( + source_paths=get_default_source_paths(), reader=reader + ) + + assets = create_assets + request_dict = { + "assets": create_assets_json(assets=assets), + "include_asset_level": False, + "include_measures": True, + "include_calc_details": False, + "use_case_id": UseCaseId.STRESS_TEST, + "years": years, + "scenarios": scenarios, + } + + container = Container() + + class TestHazardModelFactory(HazardModelFactory): + def hazard_model( + self, + interpolation: str = "floor", + provider_max_requests: Dict[str, int] = {}, + ): + return hazard_model + + class TestVulnerabilityModelFactory(VulnerabilityModelsFactory): + def vulnerability_models(self): + vulnerability_models = DictBasedVulnerabilityModels( + { + ThermalPowerGeneratingAsset: [ + ThermalPowerGenerationAqueductWaterRiskModel() + ] + } + ) + return vulnerability_models + + container.override_providers( + hazard_model_factory=providers.Factory(TestHazardModelFactory) + ) + + container.override_providers( + config=providers.Configuration(default={"zarr_sources": ["embedded"]}) + ) + container.override_providers(inventory_reader=reader) + container.override_providers(zarr_reader=reader) + + container.override_providers( + vulnerability_models_factory=providers.Factory(TestVulnerabilityModelFactory) + ) + + requester = container.requester() + res = requester.get(request_id="get_asset_impact", request_dict=request_dict) + response = AssetImpactResponse.model_validate_json(res) + + res = next( + ma + for ma in response.risk_measures.measures_for_assets + if ma.key.hazard_type == "WaterRisk" + ) + np.testing.assert_allclose(res.measures_0[1], 1.067627) diff --git a/tests/risk_models/risk_models_test.py b/tests/risk_models/risk_models_test.py index fbd05aeb..f7f20753 100644 --- a/tests/risk_models/risk_models_test.py +++ b/tests/risk_models/risk_models_test.py @@ -1,16 +1,18 @@ -"""Test asset impact calculations.""" - from typing import Dict, Sequence import numpy as np +import pytest from dependency_injector import providers +from physrisk.api.v1.common import UseCaseId from physrisk.api.v1.impact_req_resp import ( AssetImpactResponse, RiskMeasureKey, RiskMeasuresHelper, ) from physrisk.container import Container +from physrisk.data.inventory import expand +from physrisk.data.inventory_reader import InventoryReader from physrisk.data.pregenerated_hazard_model import ZarrHazardModel from physrisk.hazard_models.core_hazards import get_default_source_paths from physrisk.kernel.assets import Asset, RealEstateAsset @@ -27,12 +29,12 @@ Wind, ) from physrisk.kernel.risk import AssetLevelRiskModel, MeasureKey -from physrisk.kernel.vulnerability_model import DictBasedVulnerabilityModels +from physrisk.kernel.vulnerability_model import ( + DictBasedVulnerabilityModels, +) from physrisk.requests import _create_risk_measures -from physrisk.risk_models.generic_risk_model import GenericScoreBasedRiskMeasures from physrisk.risk_models.risk_models import RealEstateToyRiskMeasures -from ..base_test import TestWithCredentials from ..data.hazard_model_store_test import ( TestData, ZarrStoreMocker, @@ -40,322 +42,336 @@ ) -class TestRiskModels(TestWithCredentials): - def test_risk_indicator_model(self): - scenarios = ["rcp8p5"] - years = [2050] - - assets = self._create_assets() - hazard_model = self._create_hazard_model(scenarios, years) - - model = AssetLevelRiskModel( - hazard_model, - DictBasedVulnerabilityModels(get_default_vulnerability_models()), - {RealEstateAsset: RealEstateToyRiskMeasures()}, +@pytest.fixture +def create_assets(): + assets = [ + RealEstateAsset( + TestData.latitudes[0], + TestData.longitudes[0], + location="Asia", + type="Buildings/Industrial", ) - measure_ids_for_asset, definitions = model.populate_measure_definitions(assets) - _, measures = model.calculate_risk_measures( - assets, prosp_scens=scenarios, years=years + for i in range(2) + ] + return assets + + +def create_assets_json(assets: Sequence[RealEstateAsset]): + assets_dict = { + "items": [ + { + "asset_class": type(asset).__name__, + "type": asset.type, + "location": asset.location, + "longitude": asset.longitude, + "latitude": asset.latitude, + "attributes": { + "number_of_storeys": "2", + "structure_type": "concrete", + }, + } + for asset in assets + ], + } + return assets_dict + + +def create_hazard_model(scenarios, years): + source_paths = get_default_source_paths() + + def sp_riverine(scenario, year): + return source_paths[RiverineInundation]( + indicator_id="flood_depth", scenario=scenario, year=year ) - # how to get a score using the MeasureKey - measure = measures[ - MeasureKey(assets[0], scenarios[0], years[0], RiverineInundation) - ] - score = measure.score - measure_0 = measure.measure_0 - np.testing.assert_allclose([measure_0], [0.89306593179]) - - # packing up the risk measures, e.g. for JSON transmission: - risk_measures = _create_risk_measures( - measures, measure_ids_for_asset, definitions, assets, scenarios, years - ) - # we still have a key, but no asset: - key = RiskMeasureKey( - hazard_type="RiverineInundation", - scenario_id=scenarios[0], - year=str(years[0]), - measure_id=risk_measures.score_based_measure_set_defn.measure_set_id, - ) - item = next(m for m in risk_measures.measures_for_assets if m.key == key) - score2 = item.scores[0] - measure_0_2 = item.measures_0[0] - assert score == score2 - assert measure_0 == measure_0_2 - - helper = RiskMeasuresHelper(risk_measures) - asset_scores, measures, definitions = helper.get_measure( - "CoastalInundation", scenarios[0], years[0] - ) - label, description = helper.get_score_details(asset_scores[0], definitions[0]) - assert asset_scores[0] == 4 - - def _create_assets(self): - assets = [ - RealEstateAsset( - TestData.latitudes[0], - TestData.longitudes[0], - location="Asia", - type="Buildings/Industrial", - ) - for i in range(2) - ] - return assets - - def _create_assets_json(self, assets: Sequence[RealEstateAsset]): - assets_dict = { - "items": [ - { - "asset_class": type(asset).__name__, - "type": asset.type, - "location": asset.location, - "longitude": asset.longitude, - "latitude": asset.latitude, - "attributes": { - "number_of_storeys": "2", - "structure_type": "concrete", - }, - } - for asset in assets - ], - } - return assets_dict - - def _create_hazard_model(self, scenarios, years): - source_paths = get_default_source_paths() - - def sp_riverine(scenario, year): - return source_paths[RiverineInundation]( - indicator_id="flood_depth", scenario=scenario, year=year - ) - - def sp_coastal(scenario, year): - return source_paths[CoastalInundation]( - indicator_id="flood_depth", scenario=scenario, year=year - ) - - def sp_wind(scenario, year): - return source_paths[Wind]( - indicator_id="max_speed", scenario=scenario, year=year - ) - - def sp_heat(scenario, year): - return source_paths[ChronicHeat]( - indicator_id="days/above/35c", scenario=scenario, year=year - ) - - def sp_fire(scenario, year): - return source_paths[Fire]( - indicator_id="fire_probability", scenario=scenario, year=year - ) - - def sp_hail(scenario, year): - return source_paths[Hail]( - indicator_id="days/above/5cm", scenario=scenario, year=year - ) - - def sp_drought(scenario, year): - return source_paths[Drought]( - indicator_id="months/spei3m/below/-2", scenario=scenario, year=year - ) - - def sp_precipitation(scenario, year): - return source_paths[Precipitation]( - indicator_id="max/daily/water_equivalent", scenario=scenario, year=year - ) - - mocker = ZarrStoreMocker() - return_periods = inundation_return_periods() - flood_histo_curve = np.array( - [0.0596, 0.333, 0.505, 0.715, 0.864, 1.003, 1.149, 1.163, 1.163] - ) - flood_projected_curve = np.array( - [0.0596, 0.333, 0.605, 0.915, 1.164, 1.503, 1.649, 1.763, 1.963] + def sp_coastal(scenario, year): + return source_paths[CoastalInundation]( + indicator_id="flood_depth", scenario=scenario, year=year ) - for path in [sp_riverine("historical", 1980), sp_coastal("historical", 1980)]: - mocker.add_curves_global( - path, - TestData.longitudes, - TestData.latitudes, - return_periods, - flood_histo_curve, - ) - - for path in [sp_riverine("rcp8p5", 2050), sp_coastal("rcp8p5", 2050)]: - mocker.add_curves_global( - path, - TestData.longitudes, - TestData.latitudes, - return_periods, - flood_projected_curve, - ) - - mocker.add_curves_global( - sp_wind("historical", -1), - TestData.longitudes, - TestData.latitudes, - TestData.wind_return_periods, - TestData.wind_intensities_1, - units="m/s", - ) - mocker.add_curves_global( - sp_wind("rcp8p5", 2050), - TestData.longitudes, - TestData.latitudes, - TestData.wind_return_periods, - TestData.wind_intensities_2, - units="m/s", - ) - mocker.add_curves_global( - sp_heat("historical", -1), - TestData.longitudes, - TestData.latitudes, - TestData.temperature_thresholds, - TestData.degree_days_above_index_1, + def sp_wind(scenario, year): + return source_paths[Wind]( + indicator_id="max_speed", scenario=scenario, year=year ) - mocker.add_curves_global( - sp_heat("rcp8p5", 2050), - TestData.longitudes, - TestData.latitudes, - TestData.temperature_thresholds, - TestData.degree_days_above_index_2, - ) - mocker.add_curves_global( - sp_fire("historical", -1), - TestData.longitudes, - TestData.latitudes, - [0], - [0.15], + + def sp_heat(scenario, year): + return source_paths[ChronicHeat]( + indicator_id="days/above/35c", scenario=scenario, year=year ) - mocker.add_curves_global( - sp_fire("rcp8p5", 2050), - TestData.longitudes, - TestData.latitudes, - [0], - [0.2], + + def sp_heat_mean_degrees(scenario, year): + return source_paths[ChronicHeat]( + indicator_id="mean_degree_days/above/index", scenario=scenario, year=year ) - mocker.add_curves_global( - sp_hail("historical", -1), - TestData.longitudes, - TestData.latitudes, - [0], - [2.15], + + def sp_fire(scenario, year): + return source_paths[Fire]( + indicator_id="fire_probability", scenario=scenario, year=year ) - mocker.add_curves_global( - sp_hail("rcp8p5", 2050), - TestData.longitudes, - TestData.latitudes, - [0], - [4], + + def sp_hail(scenario, year): + return source_paths[Hail]( + indicator_id="days/above/5cm", scenario=scenario, year=year ) - mocker.add_curves_global( - sp_drought("historical", -1), - TestData.longitudes, - TestData.latitudes, - [0], - [3], + + def sp_drought(scenario, year): + return source_paths[Drought]( + indicator_id="months/spei3m/below/-2", scenario=scenario, year=year ) - mocker.add_curves_global( - sp_drought("rcp8p5", 2050), - TestData.longitudes, - TestData.latitudes, - [0], - [9], + + def sp_precipitation(scenario, year): + return source_paths[Precipitation]( + indicator_id="max/daily/water_equivalent", scenario=scenario, year=year ) + + mocker = ZarrStoreMocker() + return_periods = inundation_return_periods() + flood_histo_curve = np.array( + [0.0596, 0.333, 0.505, 0.715, 0.864, 1.003, 1.149, 1.163, 1.163] + ) + flood_projected_curve = np.array( + [0.0596, 0.333, 0.605, 0.915, 1.164, 1.503, 1.649, 1.763, 1.963] + ) + + for path in [sp_riverine("historical", 1980), sp_coastal("historical", 1980)]: mocker.add_curves_global( - sp_precipitation("historical", -1), + path, TestData.longitudes, TestData.latitudes, - [0], - [10], + return_periods, + flood_histo_curve, ) + + for path in [sp_riverine("rcp8p5", 2050), sp_coastal("rcp8p5", 2050)]: mocker.add_curves_global( - sp_precipitation("rcp8p5", 2050), + path, TestData.longitudes, TestData.latitudes, - [0], - [70], - ) - - return ZarrHazardModel( - source_paths=get_default_source_paths(), store=mocker.store + return_periods, + flood_projected_curve, ) - def test_via_requests(self): - scenarios = ["ssp585"] - years = [2050] - - assets = self._create_assets() - # hazard_model = ZarrHazardModel(source_paths=get_default_source_paths()) - hazard_model = self._create_hazard_model(scenarios, years) - - request_dict = { - "assets": self._create_assets_json(assets), - "include_asset_level": False, - "include_measures": True, - "include_calc_details": False, - "years": years, - "scenarios": scenarios, - } - - # request = requests.AssetImpactRequest(**request_dict) - - container = Container() - - class TestHazardModelFactory(HazardModelFactory): - def hazard_model( - self, - interpolation: str = "floor", - provider_max_requests: Dict[str, int] = ..., - ): - return hazard_model - - container.override_providers( - hazard_model_factory=providers.Factory(TestHazardModelFactory) - ) - container.override_providers( - config=providers.Configuration(default={"zarr_sources": ["embedded"]}) - ) - container.override_providers(inventory_reader=None) - container.override_providers(zarr_reader=None) - - requester = container.requester() - res = requester.get(request_id="get_asset_impact", request_dict=request_dict) - response = AssetImpactResponse.model_validate_json(res) - - # response = requests._get_asset_impacts( - # request, - # hazard_model, - # vulnerability_models=DictBasedVulnerabilityModels(get_default_vulnerability_models()), - # ) - res = next( - ma - for ma in response.risk_measures.measures_for_assets - if ma.key.hazard_type == "RiverineInundation" - ) - np.testing.assert_allclose(res.measures_0, [0.89306593179, 0.89306593179]) - # json_str = json.dumps(response.model_dump(), cls=NumpyArrayEncoder) - - def test_generic_model(self): - scenarios = ["rcp8p5"] - years = [2050] - - assets = [ - Asset(TestData.latitudes[0], TestData.longitudes[0]) for i in range(2) - ] - hazard_model = self._create_hazard_model(scenarios, years) - - model = AssetLevelRiskModel( - hazard_model, - DictBasedVulnerabilityModels(get_default_vulnerability_models()), - {Asset: GenericScoreBasedRiskMeasures()}, - ) - measure_ids_for_asset, definitions = model.populate_measure_definitions(assets) - _, measures = model.calculate_risk_measures( - assets, prosp_scens=scenarios, years=years - ) - np.testing.assert_approx_equal( - measures[MeasureKey(assets[0], scenarios[0], years[0], Wind)].measure_0, - 214.01549835205077, - ) + mocker.add_curves_global( + sp_wind("historical", -1), + TestData.longitudes, + TestData.latitudes, + TestData.wind_return_periods, + TestData.wind_intensities_1, + units="m/s", + ) + mocker.add_curves_global( + sp_wind("rcp8p5", 2050), + TestData.longitudes, + TestData.latitudes, + TestData.wind_return_periods, + TestData.wind_intensities_2, + units="m/s", + ) + mocker.add_curves_global( + sp_heat("historical", -1), + TestData.longitudes, + TestData.latitudes, + TestData.temperature_thresholds, + TestData.degree_days_above_index_1, + ) + mocker.add_curves_global( + sp_heat("rcp8p5", 2050), + TestData.longitudes, + TestData.latitudes, + TestData.temperature_thresholds, + TestData.degree_days_above_index_2, + ) + mocker.add_curves_global( + sp_heat_mean_degrees("historical", -1), + TestData.longitudes, + TestData.latitudes, + TestData.temperature_thresholds, + TestData.degree_days_above_index_1, + ) + mocker.add_curves_global( + sp_heat_mean_degrees("rcp8p5", 2050), + TestData.longitudes, + TestData.latitudes, + TestData.temperature_thresholds, + TestData.degree_days_above_index_2, + ) + mocker.add_curves_global( + sp_fire("historical", -1), + TestData.longitudes, + TestData.latitudes, + [0], + [0.15], + ) + mocker.add_curves_global( + sp_fire("rcp8p5", 2050), + TestData.longitudes, + TestData.latitudes, + [0], + [0.2], + ) + mocker.add_curves_global( + sp_hail("historical", -1), + TestData.longitudes, + TestData.latitudes, + [0], + [2.15], + ) + mocker.add_curves_global( + sp_hail("rcp8p5", 2050), + TestData.longitudes, + TestData.latitudes, + [0], + [4], + ) + mocker.add_curves_global( + sp_drought("historical", -1), + TestData.longitudes, + TestData.latitudes, + [0], + [3], + ) + mocker.add_curves_global( + sp_drought("rcp8p5", 2050), + TestData.longitudes, + TestData.latitudes, + [0], + [9], + ) + mocker.add_curves_global( + sp_precipitation("historical", -1), + TestData.longitudes, + TestData.latitudes, + [0], + [10], + ) + mocker.add_curves_global( + sp_precipitation("rcp8p5", 2050), + TestData.longitudes, + TestData.latitudes, + [0], + [70], + ) + + return ZarrHazardModel(source_paths=get_default_source_paths(), store=mocker.store) + + +@pytest.fixture +def hazard_model(): + scenarios = ["rcp8p5"] + years = [2050] + return create_hazard_model(scenarios, years) + + +def test_risk_indicator_model(hazard_model, create_assets): + scenarios = ["rcp8p5"] + years = [2050] + + assets = create_assets + + model = AssetLevelRiskModel( + hazard_model, + vulnerability_models=None, + measure_calculators={RealEstateAsset: RealEstateToyRiskMeasures()}, + use_case_id=UseCaseId.DEFAULT, + ) + measure_ids_for_asset, definitions = model.populate_measure_definitions(assets) + _, measures = model.calculate_risk_measures( + assets, prosp_scens=scenarios, years=years + ) + + measure = measures[ + MeasureKey(assets[0], scenarios[0], years[0], RiverineInundation) + ] + score = measure.score + measure_0 = measure.measure_0 + np.testing.assert_allclose([measure_0], [0.89306593179]) + + risk_measures = _create_risk_measures( + measures, measure_ids_for_asset, definitions, assets, scenarios, years + ) + key = RiskMeasureKey( + hazard_type="RiverineInundation", + scenario_id=scenarios[0], + year=str(years[0]), + measure_id=risk_measures.score_based_measure_set_defn.measure_set_id, + ) + item = next(m for m in risk_measures.measures_for_assets if m.key == key) + score2 = item.scores[0] + measure_0_2 = item.measures_0[0] + assert score == score2 + assert measure_0 == measure_0_2 + + helper = RiskMeasuresHelper(risk_measures) + asset_scores, measures, definitions = helper.get_measure( + "ChronicHeat", scenarios[0], years[0] + ) + label, description = helper.get_score_details(asset_scores[0], definitions[0]) + assert asset_scores[0] == 4 + + +def test_via_requests(hazard_model, create_assets): + scenarios = ["rcp8p5"] + years = [2050] + + assets = create_assets + request_dict = { + "assets": create_assets_json(assets), + "include_asset_level": False, + "include_measures": True, + "include_calc_details": False, + "years": years, + "scenarios": scenarios, + } + + container = Container() + + class TestHazardModelFactory(HazardModelFactory): + def hazard_model( + self, + interpolation: str = "floor", + provider_max_requests: Dict[str, int] = {}, + ): + return hazard_model + + container.override_providers( + hazard_model_factory=providers.Factory(TestHazardModelFactory) + ) + container.override_providers( + config=providers.Configuration(default={"zarr_sources": ["embedded"]}) + ) + container.override_providers(inventory_reader=None) + container.override_providers(zarr_reader=None) + + requester = container.requester() + res = requester.get(request_id="get_asset_impact", request_dict=request_dict) + response = AssetImpactResponse.model_validate_json(res) + + res = next( + ma + for ma in response.risk_measures.measures_for_assets + if ma.key.hazard_type == "RiverineInundation" + ) + np.testing.assert_allclose(res.measures_0, [0.89306593179, 0.89306593179]) + + +def test_generic_model(hazard_model): + scenarios = ["rcp8p5"] + years = [2050] + + assets = [Asset(TestData.latitudes[0], TestData.longitudes[0]) for i in range(2)] + + model = AssetLevelRiskModel( + hazard_model=hazard_model, + vulnerability_models=DictBasedVulnerabilityModels( + get_default_vulnerability_models() + ), + use_case_id=UseCaseId.GENERIC, + ) + measure_ids_for_asset, definitions = model.populate_measure_definitions(assets) + _, measures = model.calculate_risk_measures( + assets, prosp_scens=scenarios, years=years + ) + np.testing.assert_approx_equal( + measures[MeasureKey(assets[0], scenarios[0], years[0], Wind)].measure_0, + 214.01549835205077, + )