From 7cb72ebfe56d624195af68a87c797167067ed48b Mon Sep 17 00:00:00 2001
From: "Casey D. Burleyson" <31452682+cdburley@users.noreply.github.com>
Date: Fri, 24 May 2024 10:23:21 -0700
Subject: [PATCH 1/5] Update execute_forward.py
Adjusted input function to account for new GCAM-USA data format.
---
tell/execute_forward.py | 92 ++++++++++++++++++++++++++++++++++-------
1 file changed, 76 insertions(+), 16 deletions(-)
diff --git a/tell/execute_forward.py b/tell/execute_forward.py
index fc0989c..dfbbf8b 100644
--- a/tell/execute_forward.py
+++ b/tell/execute_forward.py
@@ -9,39 +9,99 @@
from scipy import interpolate
from .states_fips_function import state_metadata_from_state_abbreviation
-def extract_gcam_usa_loads(scenario_to_process: str, filename: str) -> DataFrame:
+# def extract_gcam_usa_loads(scenario_to_process: str, filename: str) -> DataFrame:
+# """Extracts the state-level annual loads from a GCAM-USA output file.
+#
+# :param scenario_to_process: Scenario to process
+# :type scenario_to_process: str
+#
+# :param filename: Name of the GCAM-USA output file
+# :type filename: str
+#
+# :return: DataFrame of state-level annual total electricity loads
+#
+# """
+#
+# # Load in the raw GCAM-USA output file:
+# gcam_usa_df = pd.read_csv(filename, index_col=None, header=0)
+#
+# # Cluge the scenario for historical runs:
+# if scenario_to_process == 'historic':
+# scenario_to_process_gcam = 'rcp45cooler_ssp3'
+# else:
+# scenario_to_process_gcam = scenario_to_process
+#
+# # Subset the data to only the scenario you want to process:
+# gcam_usa_df = gcam_usa_df[gcam_usa_df['scenario'].isin([scenario_to_process_gcam])]
+#
+# # Subset the data to only the total annual consumption of electricity by state:
+# gcam_usa_df = gcam_usa_df[gcam_usa_df['param'].isin(['elecFinalBySecTWh'])]
+#
+# # Make a list of all of the states in the "gcam_usa_df":
+# states = gcam_usa_df['subRegion'].unique()
+#
+# # Loop over the states and interpolate their loads to an annual time step:
+# for i in range(len(states)):
+#
+# # Subset to just the data for the state being processed:
+# subset_df = gcam_usa_df[gcam_usa_df['subRegion'].isin([states[i]])].copy()
+#
+# # Retrieve the state metadata:
+# (state_fips, state_name) = state_metadata_from_state_abbreviation(states[i])
+#
+# # Linearly interpolate the 5-year loads from GCAM-USA to an annual time step:
+# annual_time_vector = pd.Series(range(subset_df['x'].min(), subset_df['x'].max()))
+# interpolation_function = interpolate.interp1d(subset_df['x'], subset_df['value'], kind='linear')
+# annual_loads = interpolation_function(annual_time_vector)
+#
+# # Create an empty dataframe and store the results:
+# state_df = pd.DataFrame()
+# state_df['Year'] = annual_time_vector.tolist()
+# state_df['GCAM_USA_State_Annual_Load_TWh'] = annual_loads
+# state_df['State_FIPS'] = state_fips
+# state_df['State_Name'] = state_name
+# state_df['State_Abbreviation'] = states[i]
+#
+# # Aggregate the output into a new dataframe:
+# if i == 0:
+# gcam_usa_output_df = state_df
+# else:
+# gcam_usa_output_df = pd.concat([gcam_usa_output_df, state_df])
+#
+# return gcam_usa_output_df
+
+
+def extract_gcam_usa_loads(scenario_to_process: str, gcam_usa_input_dir:str) -> DataFrame:
"""Extracts the state-level annual loads from a GCAM-USA output file.
:param scenario_to_process: Scenario to process
:type scenario_to_process: str
- :param filename: Name of the GCAM-USA output file
- :type filename: str
+ :param gcam_usa_input_dir: Path to where the GCAM-USA data are stored
+ :type gcam_usa_input_dir: str
:return: DataFrame of state-level annual total electricity loads
"""
- # Load in the raw GCAM-USA output file:
- gcam_usa_df = pd.read_csv(filename, index_col=None, header=0)
-
# Cluge the scenario for historical runs:
if scenario_to_process == 'historic':
- scenario_to_process_gcam = 'rcp45cooler_ssp3'
+ scenario_to_process_gcam = 'rcp45cooler_ssp3'
else:
- scenario_to_process_gcam = scenario_to_process
+ scenario_to_process_gcam = scenario_to_process
- # Subset the data to only the scenario you want to process:
- gcam_usa_df = gcam_usa_df[gcam_usa_df['scenario'].isin([scenario_to_process_gcam])]
+ # Create the filename for the needed GCAM run:
+ filename = (os.path.join(gcam_usa_input_dir, ('electricity_demand_' + scenario_to_process_gcam + '.csv')))
- # Subset the data to only the total annual consumption of electricity by state:
- gcam_usa_df = gcam_usa_df[gcam_usa_df['param'].isin(['elecFinalBySecTWh'])]
+ # Load in the raw GCAM-USA output file:
+ gcam_usa_df = pd.read_csv(filename, index_col=None, header=0)
# Make a list of all of the states in the "gcam_usa_df":
states = gcam_usa_df['subRegion'].unique()
# Loop over the states and interpolate their loads to an annual time step:
for i in range(len(states)):
+ # for i in range(1):
# Subset to just the data for the state being processed:
subset_df = gcam_usa_df[gcam_usa_df['subRegion'].isin([states[i]])].copy()
@@ -475,7 +535,7 @@ def output_tell_county_data(joint_mlp_df: DataFrame, year_to_process: str, gcam_
state_name = state_name.replace(",", "_")
csv_output_filename = os.path.join(
- data_output_dir + '/County_Level_Data/TELL_' + state_name + '_' + county_name + '_Hourly_Load_Data_' +
+ data_output_dir + '/County_Level_Data/' + year_to_process + '/TELL_' + state_name + '_' + county_name + '_Hourly_Load_Data_' +
year_to_process + '_Scaled_' + gcam_target_year + '.csv')
# Write out the dataframe to a .csv file:
@@ -537,12 +597,12 @@ def execute_forward(year_to_process: str, gcam_target_year: str, scenario_to_pro
if os.path.exists(data_output_dir_full) is False:
os.makedirs(data_output_dir_full)
if save_county_data:
- if os.path.exists(os.path.join(data_output_dir_full, 'County_Level_Data')) is False:
- os.mkdir(os.path.join(data_output_dir_full, 'County_Level_Data'))
+ if os.path.exists(os.path.join(data_output_dir_full, 'County_Level_Data', year_to_process)) is False:
+ os.mkdir(os.path.join(data_output_dir_full, 'County_Level_Data', year_to_process))
# Load in the sample GCAM-USA output file and subset the data to only the "year_to_process":
gcam_usa_df = extract_gcam_usa_loads(scenario_to_process = scenario_to_process,
- filename = (os.path.join(gcam_usa_input_dir, 'gcamDataTable_aggParam.csv')))
+ gcam_usa_input_dir = gcam_usa_input_dir)
gcam_usa_df = gcam_usa_df[gcam_usa_df['Year'] == int(gcam_target_year)]
# Load in the most recent (i.e., 2019) BA service territory mapping file:
From 420b742ef41efd642e8012f21de9b6e064032213 Mon Sep 17 00:00:00 2001
From: "Casey D. Burleyson" <31452682+cdburley@users.noreply.github.com>
Date: Mon, 19 Aug 2024 11:18:24 -0700
Subject: [PATCH 2/5] Updated package data
---
notebooks/tell_quickstarter.ipynb | 749 +++++++++++++++++++++++++++++-
requirements.txt | 1 -
setup.py | 4 +-
tell/data_process_population.py | 3 +-
tell/execute_forward.py | 65 +--
tell/install_forcing_data.py | 6 +-
tell/install_quickstarter_data.py | 6 +-
7 files changed, 748 insertions(+), 86 deletions(-)
diff --git a/notebooks/tell_quickstarter.ipynb b/notebooks/tell_quickstarter.ipynb
index ea1fc7a..c5528ae 100644
--- a/notebooks/tell_quickstarter.ipynb
+++ b/notebooks/tell_quickstarter.ipynb
@@ -24,7 +24,7 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 1,
"id": "86db683a-70a2-4f89-a1d6-4c6d5f180272",
"metadata": {
"tags": []
@@ -48,7 +48,7 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 2,
"id": "f7cc0f66-ba83-47d4-b161-0b1ad076064c",
"metadata": {
"tags": []
@@ -71,10 +71,19 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 3,
"id": "66df663b-0fa6-4c00-b3d0-640275c28ad4",
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Downloading the quickstarter data package for tell version 1.1.0...\n",
+ "Done!\n"
+ ]
+ }
+ ],
"source": [
"# Download the TELL quickstarter data package from Zenodo:\n",
"tell.install_quickstarter_data(data_dir = tell_data_dir)\n"
@@ -126,12 +135,182 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 4,
"id": "ccddc4b8",
"metadata": {
"tags": []
},
- "outputs": [],
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "
\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " datetime | \n",
+ " predictions | \n",
+ " ground_truth | \n",
+ " region | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 2019-01-01 00:00:00 | \n",
+ " 88979.657613 | \n",
+ " 94016.0 | \n",
+ " PJM | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2019-01-01 01:00:00 | \n",
+ " 85737.057386 | \n",
+ " 90385.0 | \n",
+ " PJM | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 2019-01-01 02:00:00 | \n",
+ " 82919.012268 | \n",
+ " 86724.0 | \n",
+ " PJM | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 2019-01-01 03:00:00 | \n",
+ " 79846.214990 | \n",
+ " 82978.0 | \n",
+ " PJM | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 2019-01-01 04:00:00 | \n",
+ " 76893.084059 | \n",
+ " 79536.0 | \n",
+ " PJM | \n",
+ "
\n",
+ " \n",
+ " 5 | \n",
+ " 2019-01-01 05:00:00 | \n",
+ " 73983.056433 | \n",
+ " 76608.0 | \n",
+ " PJM | \n",
+ "
\n",
+ " \n",
+ " 6 | \n",
+ " 2019-01-01 06:00:00 | \n",
+ " 71563.065721 | \n",
+ " 73926.0 | \n",
+ " PJM | \n",
+ "
\n",
+ " \n",
+ " 7 | \n",
+ " 2019-01-01 07:00:00 | \n",
+ " 69222.882840 | \n",
+ " 72062.0 | \n",
+ " PJM | \n",
+ "
\n",
+ " \n",
+ " 8 | \n",
+ " 2019-01-01 08:00:00 | \n",
+ " 68783.484984 | \n",
+ " 70756.0 | \n",
+ " PJM | \n",
+ "
\n",
+ " \n",
+ " 9 | \n",
+ " 2019-01-01 09:00:00 | \n",
+ " 69488.921828 | \n",
+ " 69515.0 | \n",
+ " PJM | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " datetime predictions ground_truth region\n",
+ "0 2019-01-01 00:00:00 88979.657613 94016.0 PJM\n",
+ "1 2019-01-01 01:00:00 85737.057386 90385.0 PJM\n",
+ "2 2019-01-01 02:00:00 82919.012268 86724.0 PJM\n",
+ "3 2019-01-01 03:00:00 79846.214990 82978.0 PJM\n",
+ "4 2019-01-01 04:00:00 76893.084059 79536.0 PJM\n",
+ "5 2019-01-01 05:00:00 73983.056433 76608.0 PJM\n",
+ "6 2019-01-01 06:00:00 71563.065721 73926.0 PJM\n",
+ "7 2019-01-01 07:00:00 69222.882840 72062.0 PJM\n",
+ "8 2019-01-01 08:00:00 68783.484984 70756.0 PJM\n",
+ "9 2019-01-01 09:00:00 69488.921828 69515.0 PJM"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " BA | \n",
+ " RMS_ABS | \n",
+ " RMS_NORM | \n",
+ " MAPE | \n",
+ " R2 | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " PJM | \n",
+ " 4384.157491 | \n",
+ " 0.047989 | \n",
+ " 0.036301 | \n",
+ " 0.923456 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " BA RMS_ABS RMS_NORM MAPE R2\n",
+ "0 PJM 4384.157491 0.047989 0.036301 0.923456"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
"source": [
"# Run the MLP training step for a single BA (i.e., \"region\"):\n",
"prediction_df, validation_df = tell.train(region = 'PJM',\n",
@@ -154,12 +333,540 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 5,
"id": "b87a1a00-7b3b-4e66-8eec-8828d91b6a53",
"metadata": {
"tags": []
},
- "outputs": [],
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " BA | \n",
+ " RMS_ABS | \n",
+ " RMS_NORM | \n",
+ " MAPE | \n",
+ " R2 | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " AEC | \n",
+ " 43.182333 | \n",
+ " 0.080527 | \n",
+ " 0.060206 | \n",
+ " 0.895177 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " AECI | \n",
+ " 349.811003 | \n",
+ " 0.130529 | \n",
+ " 0.133922 | \n",
+ " 0.652215 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " AVA | \n",
+ " 162.161834 | \n",
+ " 0.110138 | \n",
+ " 0.108262 | \n",
+ " 0.681471 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " AZPS | \n",
+ " 226.108856 | \n",
+ " 0.066803 | \n",
+ " 0.052413 | \n",
+ " 0.948951 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " BANC | \n",
+ " 119.132254 | \n",
+ " 0.062582 | \n",
+ " 0.045901 | \n",
+ " 0.918882 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " BPAT | \n",
+ " 289.193302 | \n",
+ " 0.045355 | \n",
+ " 0.034236 | \n",
+ " 0.902667 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " CHPD | \n",
+ " 28.805568 | \n",
+ " 0.136962 | \n",
+ " 0.084884 | \n",
+ " 0.758776 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " CISO | \n",
+ " 2144.139480 | \n",
+ " 0.087673 | \n",
+ " 0.064678 | \n",
+ " 0.775649 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " CPLE | \n",
+ " 511.961396 | \n",
+ " 0.072310 | \n",
+ " 0.055798 | \n",
+ " 0.894201 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " DOPD | \n",
+ " 29.164507 | \n",
+ " 0.141334 | \n",
+ " 0.142977 | \n",
+ " 0.475714 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " DUK | \n",
+ " 759.594469 | \n",
+ " 0.062310 | \n",
+ " 0.047458 | \n",
+ " 0.900849 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " EPE | \n",
+ " 68.553399 | \n",
+ " 0.070344 | \n",
+ " 0.047648 | \n",
+ " 0.930497 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " ERCO | \n",
+ " 2530.005379 | \n",
+ " 0.057767 | \n",
+ " 0.049336 | \n",
+ " 0.927076 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " FMPP | \n",
+ " 138.996269 | \n",
+ " 0.068148 | \n",
+ " 0.047540 | \n",
+ " 0.926753 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " FPC | \n",
+ " 426.639079 | \n",
+ " 0.068521 | \n",
+ " 0.051097 | \n",
+ " 0.940649 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " FPL | \n",
+ " 862.808712 | \n",
+ " 0.058933 | \n",
+ " 0.045441 | \n",
+ " 0.943773 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " GCPD | \n",
+ " 55.523529 | \n",
+ " 0.090685 | \n",
+ " 0.085862 | \n",
+ " 0.187028 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " GVL | \n",
+ " 20.126424 | \n",
+ " 0.083891 | \n",
+ " 0.059205 | \n",
+ " 0.903672 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " HST | \n",
+ " 5.304804 | \n",
+ " 0.079155 | \n",
+ " 0.059759 | \n",
+ " 0.902074 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " IID | \n",
+ " 33.647281 | \n",
+ " 0.080587 | \n",
+ " 0.067286 | \n",
+ " 0.960689 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " IPCO | \n",
+ " 145.893033 | \n",
+ " 0.075232 | \n",
+ " 0.055792 | \n",
+ " 0.870300 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " ISNE | \n",
+ " 864.147603 | \n",
+ " 0.063998 | \n",
+ " 0.048545 | \n",
+ " 0.867955 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " JEA | \n",
+ " 113.481811 | \n",
+ " 0.075732 | \n",
+ " 0.056203 | \n",
+ " 0.900466 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " LDWP | \n",
+ " 254.061359 | \n",
+ " 0.083065 | \n",
+ " 0.061816 | \n",
+ " 0.853437 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " LGEE | \n",
+ " 319.325996 | \n",
+ " 0.080571 | \n",
+ " 0.061461 | \n",
+ " 0.843170 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " MISO | \n",
+ " 3947.497097 | \n",
+ " 0.053240 | \n",
+ " 0.039292 | \n",
+ " 0.878299 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " NEVP | \n",
+ " 181.396846 | \n",
+ " 0.042907 | \n",
+ " 0.031114 | \n",
+ " 0.965330 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " NSB | \n",
+ " 4.724918 | \n",
+ " 0.093143 | \n",
+ " 0.069128 | \n",
+ " 0.901988 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " NWMT | \n",
+ " 159.519694 | \n",
+ " 0.110718 | \n",
+ " 0.116043 | \n",
+ " 0.046473 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " NYIS | \n",
+ " 929.351301 | \n",
+ " 0.052242 | \n",
+ " 0.038412 | \n",
+ " 0.916865 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " PACE | \n",
+ " 289.948624 | \n",
+ " 0.052148 | \n",
+ " 0.040129 | \n",
+ " 0.820101 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " PACW | \n",
+ " 148.109105 | \n",
+ " 0.062658 | \n",
+ " 0.048284 | \n",
+ " 0.853165 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " PGE | \n",
+ " 125.559460 | \n",
+ " 0.053611 | \n",
+ " 0.040305 | \n",
+ " 0.895974 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " PJM | \n",
+ " 4384.157491 | \n",
+ " 0.047989 | \n",
+ " 0.036301 | \n",
+ " 0.923456 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " PNM | \n",
+ " 74.255328 | \n",
+ " 0.046536 | \n",
+ " 0.035851 | \n",
+ " 0.910086 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " PSCO | \n",
+ " 573.318593 | \n",
+ " 0.104979 | \n",
+ " 0.093124 | \n",
+ " 0.500939 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " PSEI | \n",
+ " 193.784724 | \n",
+ " 0.057435 | \n",
+ " 0.044362 | \n",
+ " 0.896085 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " SC | \n",
+ " 474.745860 | \n",
+ " 0.193536 | \n",
+ " 0.140727 | \n",
+ " 0.307988 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " SCEG | \n",
+ " 199.413853 | \n",
+ " 0.070920 | \n",
+ " 0.055193 | \n",
+ " 0.881281 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " SCL | \n",
+ " 67.912004 | \n",
+ " 0.062952 | \n",
+ " 0.047972 | \n",
+ " 0.876773 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " SEC | \n",
+ " 101.168513 | \n",
+ " 0.750749 | \n",
+ " 0.499948 | \n",
+ " -4.591980 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " SOCO | \n",
+ " 1533.652153 | \n",
+ " 0.056336 | \n",
+ " 0.042837 | \n",
+ " 0.914973 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " SPA | \n",
+ " 8.902786 | \n",
+ " 0.119174 | \n",
+ " 0.097712 | \n",
+ " 0.547590 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " SRP | \n",
+ " 210.693660 | \n",
+ " 0.060925 | \n",
+ " 0.043372 | \n",
+ " 0.962568 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " SWPP | \n",
+ " 1645.852724 | \n",
+ " 0.053497 | \n",
+ " 0.039910 | \n",
+ " 0.887349 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " TAL | \n",
+ " 27.646791 | \n",
+ " 0.084667 | \n",
+ " 0.061949 | \n",
+ " 0.873906 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " TEC | \n",
+ " 179.952280 | \n",
+ " 0.073150 | \n",
+ " 0.056128 | \n",
+ " 0.916242 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " TEPC | \n",
+ " 371.806851 | \n",
+ " 0.258675 | \n",
+ " 0.196325 | \n",
+ " 0.191082 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " TIDC | \n",
+ " 19.951905 | \n",
+ " 0.065014 | \n",
+ " 0.047388 | \n",
+ " 0.926043 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " TPWR | \n",
+ " 35.683876 | \n",
+ " 0.064482 | \n",
+ " 0.045159 | \n",
+ " 0.887332 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " TVA | \n",
+ " 1047.724282 | \n",
+ " 0.057605 | \n",
+ " 0.043287 | \n",
+ " 0.910637 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " WACM | \n",
+ " 520.094134 | \n",
+ " 0.201556 | \n",
+ " 0.147421 | \n",
+ " -1.484909 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " WALC | \n",
+ " 160.437651 | \n",
+ " 0.147696 | \n",
+ " 0.126292 | \n",
+ " 0.274431 | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " WAUW | \n",
+ " 9.114774 | \n",
+ " 0.096861 | \n",
+ " 0.074179 | \n",
+ " 0.712419 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " BA RMS_ABS RMS_NORM MAPE R2\n",
+ "0 AEC 43.182333 0.080527 0.060206 0.895177\n",
+ "0 AECI 349.811003 0.130529 0.133922 0.652215\n",
+ "0 AVA 162.161834 0.110138 0.108262 0.681471\n",
+ "0 AZPS 226.108856 0.066803 0.052413 0.948951\n",
+ "0 BANC 119.132254 0.062582 0.045901 0.918882\n",
+ "0 BPAT 289.193302 0.045355 0.034236 0.902667\n",
+ "0 CHPD 28.805568 0.136962 0.084884 0.758776\n",
+ "0 CISO 2144.139480 0.087673 0.064678 0.775649\n",
+ "0 CPLE 511.961396 0.072310 0.055798 0.894201\n",
+ "0 DOPD 29.164507 0.141334 0.142977 0.475714\n",
+ "0 DUK 759.594469 0.062310 0.047458 0.900849\n",
+ "0 EPE 68.553399 0.070344 0.047648 0.930497\n",
+ "0 ERCO 2530.005379 0.057767 0.049336 0.927076\n",
+ "0 FMPP 138.996269 0.068148 0.047540 0.926753\n",
+ "0 FPC 426.639079 0.068521 0.051097 0.940649\n",
+ "0 FPL 862.808712 0.058933 0.045441 0.943773\n",
+ "0 GCPD 55.523529 0.090685 0.085862 0.187028\n",
+ "0 GVL 20.126424 0.083891 0.059205 0.903672\n",
+ "0 HST 5.304804 0.079155 0.059759 0.902074\n",
+ "0 IID 33.647281 0.080587 0.067286 0.960689\n",
+ "0 IPCO 145.893033 0.075232 0.055792 0.870300\n",
+ "0 ISNE 864.147603 0.063998 0.048545 0.867955\n",
+ "0 JEA 113.481811 0.075732 0.056203 0.900466\n",
+ "0 LDWP 254.061359 0.083065 0.061816 0.853437\n",
+ "0 LGEE 319.325996 0.080571 0.061461 0.843170\n",
+ "0 MISO 3947.497097 0.053240 0.039292 0.878299\n",
+ "0 NEVP 181.396846 0.042907 0.031114 0.965330\n",
+ "0 NSB 4.724918 0.093143 0.069128 0.901988\n",
+ "0 NWMT 159.519694 0.110718 0.116043 0.046473\n",
+ "0 NYIS 929.351301 0.052242 0.038412 0.916865\n",
+ "0 PACE 289.948624 0.052148 0.040129 0.820101\n",
+ "0 PACW 148.109105 0.062658 0.048284 0.853165\n",
+ "0 PGE 125.559460 0.053611 0.040305 0.895974\n",
+ "0 PJM 4384.157491 0.047989 0.036301 0.923456\n",
+ "0 PNM 74.255328 0.046536 0.035851 0.910086\n",
+ "0 PSCO 573.318593 0.104979 0.093124 0.500939\n",
+ "0 PSEI 193.784724 0.057435 0.044362 0.896085\n",
+ "0 SC 474.745860 0.193536 0.140727 0.307988\n",
+ "0 SCEG 199.413853 0.070920 0.055193 0.881281\n",
+ "0 SCL 67.912004 0.062952 0.047972 0.876773\n",
+ "0 SEC 101.168513 0.750749 0.499948 -4.591980\n",
+ "0 SOCO 1533.652153 0.056336 0.042837 0.914973\n",
+ "0 SPA 8.902786 0.119174 0.097712 0.547590\n",
+ "0 SRP 210.693660 0.060925 0.043372 0.962568\n",
+ "0 SWPP 1645.852724 0.053497 0.039910 0.887349\n",
+ "0 TAL 27.646791 0.084667 0.061949 0.873906\n",
+ "0 TEC 179.952280 0.073150 0.056128 0.916242\n",
+ "0 TEPC 371.806851 0.258675 0.196325 0.191082\n",
+ "0 TIDC 19.951905 0.065014 0.047388 0.926043\n",
+ "0 TPWR 35.683876 0.064482 0.045159 0.887332\n",
+ "0 TVA 1047.724282 0.057605 0.043287 0.910637\n",
+ "0 WACM 520.094134 0.201556 0.147421 -1.484909\n",
+ "0 WALC 160.437651 0.147696 0.126292 0.274431\n",
+ "0 WAUW 9.114774 0.096861 0.074179 0.712419"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
"source": [
"# Generate a list of BA abbreviations to process:\n",
"ba_abbrev_list = tell.get_balancing_authority_to_model_dict().keys()\n",
@@ -200,12 +907,28 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 6,
"id": "24b4161e-0533-40de-88bd-b5b81ec9de7e",
"metadata": {
"tags": []
},
- "outputs": [],
+ "outputs": [
+ {
+ "ename": "FileNotFoundError",
+ "evalue": "[Errno 2] No such file or directory: '/Users/burl878/Documents/Code/code_repos/tell/tell/tell/data/models/ERCO_multi-layer-perceptron-regressor_scikit-learn-version-1.2.2.joblib'",
+ "output_type": "error",
+ "traceback": [
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+ "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)",
+ "Cell \u001b[0;32mIn[6], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# Run the MLP prediction step for a single BA (i.e., \"region\"):\u001b[39;00m\n\u001b[0;32m----> 2\u001b[0m pdf \u001b[38;5;241m=\u001b[39m tell\u001b[38;5;241m.\u001b[39mpredict(region \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mERCO\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[1;32m 3\u001b[0m year \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m2039\u001b[39m,\n\u001b[1;32m 4\u001b[0m data_dir \u001b[38;5;241m=\u001b[39m os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mjoin(tell_data_dir, \u001b[38;5;124mr\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124msample_forcing_data\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124mr\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mfuture_weather\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124mr\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mrcp85hotter_ssp5\u001b[39m\u001b[38;5;124m'\u001b[39m),\n\u001b[1;32m 5\u001b[0m datetime_field_name \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mTime_UTC\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[1;32m 6\u001b[0m save_prediction \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m,\n\u001b[1;32m 7\u001b[0m prediction_output_directory \u001b[38;5;241m=\u001b[39m os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mjoin(tell_data_dir, \u001b[38;5;124mr\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtell_quickstarter_data\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124mr\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124moutputs\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124mr\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mmlp_output\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124mr\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mrcp85hotter_ssp5\u001b[39m\u001b[38;5;124m'\u001b[39m))\n\u001b[1;32m 9\u001b[0m \u001b[38;5;66;03m# View the prediction dataframe:\u001b[39;00m\n\u001b[1;32m 10\u001b[0m pdf\n",
+ "File \u001b[0;32m~/Documents/Code/code_repos/tell/tell/tell/mlp_predict.py:91\u001b[0m, in \u001b[0;36mpredict\u001b[0;34m(region, year, data_dir, datetime_field_name, save_prediction, prediction_output_directory, **kwargs)\u001b[0m\n\u001b[1;32m 84\u001b[0m data_mlp \u001b[38;5;241m=\u001b[39m DatasetPredict(region\u001b[38;5;241m=\u001b[39mregion,\n\u001b[1;32m 85\u001b[0m year\u001b[38;5;241m=\u001b[39myear,\n\u001b[1;32m 86\u001b[0m data_dir\u001b[38;5;241m=\u001b[39mdata_dir,\n\u001b[1;32m 87\u001b[0m datetime_field_name\u001b[38;5;241m=\u001b[39mdatetime_field_name,\n\u001b[1;32m 88\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 90\u001b[0m \u001b[38;5;66;03m# load models and the normalization dictionary from file\u001b[39;00m\n\u001b[0;32m---> 91\u001b[0m mlp_model, normalized_dict \u001b[38;5;241m=\u001b[39m load_predictive_models(region\u001b[38;5;241m=\u001b[39mregion,\n\u001b[1;32m 92\u001b[0m model_output_directory\u001b[38;5;241m=\u001b[39msettings\u001b[38;5;241m.\u001b[39mmodel_output_directory)\n\u001b[1;32m 94\u001b[0m \u001b[38;5;66;03m# normalize model features and targets for the MLP model\u001b[39;00m\n\u001b[1;32m 95\u001b[0m x_mlp_norm \u001b[38;5;241m=\u001b[39m normalize_prediction_data(data_arr\u001b[38;5;241m=\u001b[39mdata_mlp\u001b[38;5;241m.\u001b[39mx_data,\n\u001b[1;32m 96\u001b[0m min_train_arr\u001b[38;5;241m=\u001b[39mnormalized_dict[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmin_x_train\u001b[39m\u001b[38;5;124m\"\u001b[39m],\n\u001b[1;32m 97\u001b[0m max_train_arr\u001b[38;5;241m=\u001b[39mnormalized_dict[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmax_x_train\u001b[39m\u001b[38;5;124m\"\u001b[39m])\n",
+ "File \u001b[0;32m~/Documents/Code/code_repos/tell/tell/tell/mlp_utils.py:224\u001b[0m, in \u001b[0;36mload_predictive_models\u001b[0;34m(region, model_output_directory)\u001b[0m\n\u001b[1;32m 221\u001b[0m mlp_model_path \u001b[38;5;241m=\u001b[39m os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mjoin(model_output_directory, mlp_model_file)\n\u001b[1;32m 223\u001b[0m \u001b[38;5;66;03m# load the mlp model\u001b[39;00m\n\u001b[0;32m--> 224\u001b[0m mlp_model \u001b[38;5;241m=\u001b[39m load_model(model_file\u001b[38;5;241m=\u001b[39mmlp_model_path)\n\u001b[1;32m 226\u001b[0m \u001b[38;5;66;03m# load the normalization dictionary\u001b[39;00m\n\u001b[1;32m 227\u001b[0m normalized_dict_file \u001b[38;5;241m=\u001b[39m os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mjoin(model_output_directory, \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mregion\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m_normalization_dict.joblib\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
+ "File \u001b[0;32m~/Documents/Code/code_repos/tell/tell/tell/mlp_utils.py:185\u001b[0m, in \u001b[0;36mload_model\u001b[0;34m(model_file)\u001b[0m\n\u001b[1;32m 182\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mAssertionError\u001b[39;00m(msg)\n\u001b[1;32m 184\u001b[0m \u001b[38;5;66;03m# load model from\u001b[39;00m\n\u001b[0;32m--> 185\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m joblib\u001b[38;5;241m.\u001b[39mload(model_file)\n",
+ "File \u001b[0;32m~/anaconda3/lib/python3.11/site-packages/joblib/numpy_pickle.py:650\u001b[0m, in \u001b[0;36mload\u001b[0;34m(filename, mmap_mode)\u001b[0m\n\u001b[1;32m 648\u001b[0m obj \u001b[38;5;241m=\u001b[39m _unpickle(fobj)\n\u001b[1;32m 649\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 650\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mopen\u001b[39m(filename, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mrb\u001b[39m\u001b[38;5;124m'\u001b[39m) \u001b[38;5;28;01mas\u001b[39;00m f:\n\u001b[1;32m 651\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m _read_fileobject(f, filename, mmap_mode) \u001b[38;5;28;01mas\u001b[39;00m fobj:\n\u001b[1;32m 652\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(fobj, \u001b[38;5;28mstr\u001b[39m):\n\u001b[1;32m 653\u001b[0m \u001b[38;5;66;03m# if the returned file object is a string, this means we\u001b[39;00m\n\u001b[1;32m 654\u001b[0m \u001b[38;5;66;03m# try to load a pickle file generated with an version of\u001b[39;00m\n\u001b[1;32m 655\u001b[0m \u001b[38;5;66;03m# Joblib so we load it with joblib compatibility function.\u001b[39;00m\n",
+ "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: '/Users/burl878/Documents/Code/code_repos/tell/tell/tell/data/models/ERCO_multi-layer-perceptron-regressor_scikit-learn-version-1.2.2.joblib'"
+ ]
+ }
+ ],
"source": [
"# Run the MLP prediction step for a single BA (i.e., \"region\"):\n",
"pdf = tell.predict(region = 'ERCO',\n",
@@ -415,9 +1138,9 @@
],
"metadata": {
"kernelspec": {
- "display_name": "py3.9.15_std",
+ "display_name": "py.3.9.15_tell",
"language": "python",
- "name": "py3.9.15_std"
+ "name": "py.3.9.15_tell"
},
"language_info": {
"codemirror_mode": {
@@ -429,7 +1152,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.15"
+ "version": "3.11.3"
}
},
"nbformat": 4,
diff --git a/requirements.txt b/requirements.txt
index 11abf2f..d2867b3 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -31,7 +31,6 @@ pytz>=2021.3
PyYAML>=6.0
requests>=2.27.1
Rtree>=0.9.7
-scikit-learn==1.0.2
scipy>=1.7.3
Shapely>=1.8.0
six>=1.16.0
diff --git a/setup.py b/setup.py
index 31e9732..0e12d4f 100644
--- a/setup.py
+++ b/setup.py
@@ -22,7 +22,7 @@ def readme():
description='A model to predict total electricity loads',
long_description=readme(),
long_description_content_type="text/markdown",
- python_requires='>=3.9.*, <4',
+ python_requires='>=3.9',
include_package_data=True,
install_requires=[
'setuptools>=49.2.1',
@@ -58,11 +58,9 @@ def readme():
'PyYAML>=6.0',
'requests>=2.27.1',
'Rtree>=0.9.7',
- 'scikit-learn==1.0.2',
'scipy>=1.7.3',
'Shapely>=1.8.0',
'six>=1.16.0',
- 'sklearn>=0.0',
'threadpoolctl>=3.1.0',
'urllib3>=1.26.8',
'fastparquet>=0.8.3'
diff --git a/tell/data_process_population.py b/tell/data_process_population.py
index a9a911d..4a56f6c 100644
--- a/tell/data_process_population.py
+++ b/tell/data_process_population.py
@@ -269,7 +269,8 @@ def extract_future_ba_population(year: int, ba_code: str, scenario: str, data_in
mapping_df = mapping_df.merge(pop_df, on=['County_FIPS'])
# Only keep the columns that are needed:
- df = mapping_df[['2020', '2030', '2040', '2050', '2060', '2070', '2080', '2090', '2100']].copy()
+ # df = mapping_df[['2020', '2030', '2040', '2050', '2060', '2070', '2080', '2090', '2100']].copy()
+ df = mapping_df.drop(columns=['County_FIPS', 'BA_Code', 'state_name'])
# Sum the population across all counties:
df_sum = df.sum(axis=0)
diff --git a/tell/execute_forward.py b/tell/execute_forward.py
index dfbbf8b..61c8608 100644
--- a/tell/execute_forward.py
+++ b/tell/execute_forward.py
@@ -9,67 +9,6 @@
from scipy import interpolate
from .states_fips_function import state_metadata_from_state_abbreviation
-# def extract_gcam_usa_loads(scenario_to_process: str, filename: str) -> DataFrame:
-# """Extracts the state-level annual loads from a GCAM-USA output file.
-#
-# :param scenario_to_process: Scenario to process
-# :type scenario_to_process: str
-#
-# :param filename: Name of the GCAM-USA output file
-# :type filename: str
-#
-# :return: DataFrame of state-level annual total electricity loads
-#
-# """
-#
-# # Load in the raw GCAM-USA output file:
-# gcam_usa_df = pd.read_csv(filename, index_col=None, header=0)
-#
-# # Cluge the scenario for historical runs:
-# if scenario_to_process == 'historic':
-# scenario_to_process_gcam = 'rcp45cooler_ssp3'
-# else:
-# scenario_to_process_gcam = scenario_to_process
-#
-# # Subset the data to only the scenario you want to process:
-# gcam_usa_df = gcam_usa_df[gcam_usa_df['scenario'].isin([scenario_to_process_gcam])]
-#
-# # Subset the data to only the total annual consumption of electricity by state:
-# gcam_usa_df = gcam_usa_df[gcam_usa_df['param'].isin(['elecFinalBySecTWh'])]
-#
-# # Make a list of all of the states in the "gcam_usa_df":
-# states = gcam_usa_df['subRegion'].unique()
-#
-# # Loop over the states and interpolate their loads to an annual time step:
-# for i in range(len(states)):
-#
-# # Subset to just the data for the state being processed:
-# subset_df = gcam_usa_df[gcam_usa_df['subRegion'].isin([states[i]])].copy()
-#
-# # Retrieve the state metadata:
-# (state_fips, state_name) = state_metadata_from_state_abbreviation(states[i])
-#
-# # Linearly interpolate the 5-year loads from GCAM-USA to an annual time step:
-# annual_time_vector = pd.Series(range(subset_df['x'].min(), subset_df['x'].max()))
-# interpolation_function = interpolate.interp1d(subset_df['x'], subset_df['value'], kind='linear')
-# annual_loads = interpolation_function(annual_time_vector)
-#
-# # Create an empty dataframe and store the results:
-# state_df = pd.DataFrame()
-# state_df['Year'] = annual_time_vector.tolist()
-# state_df['GCAM_USA_State_Annual_Load_TWh'] = annual_loads
-# state_df['State_FIPS'] = state_fips
-# state_df['State_Name'] = state_name
-# state_df['State_Abbreviation'] = states[i]
-#
-# # Aggregate the output into a new dataframe:
-# if i == 0:
-# gcam_usa_output_df = state_df
-# else:
-# gcam_usa_output_df = pd.concat([gcam_usa_output_df, state_df])
-#
-# return gcam_usa_output_df
-
def extract_gcam_usa_loads(scenario_to_process: str, gcam_usa_input_dir:str) -> DataFrame:
"""Extracts the state-level annual loads from a GCAM-USA output file.
@@ -101,7 +40,6 @@ def extract_gcam_usa_loads(scenario_to_process: str, gcam_usa_input_dir:str) ->
# Loop over the states and interpolate their loads to an annual time step:
for i in range(len(states)):
- # for i in range(1):
# Subset to just the data for the state being processed:
subset_df = gcam_usa_df[gcam_usa_df['subRegion'].isin([states[i]])].copy()
@@ -588,7 +526,7 @@ def execute_forward(year_to_process: str, gcam_target_year: str, scenario_to_pro
# Print the start time and set a time variable to benchmark the run time:
begin_time = datetime.datetime.now()
- print('Start time = ', begin_time)
+ print('Scenario = ', scenario_to_process, ', Year = ', year_to_process)
# Set the data output directory:
data_output_dir_full = os.path.join(data_output_dir, scenario_to_process, gcam_target_year)
@@ -660,7 +598,6 @@ def execute_forward(year_to_process: str, gcam_target_year: str, scenario_to_pro
output_tell_county_data(joint_mlp_df, year_to_process, gcam_target_year, data_output_dir_full)
# Output the end time and elapsed time in order to benchmark the run time:
- print('End time = ', datetime.datetime.now())
print('Elapsed time = ', datetime.datetime.now() - begin_time)
return summary_df, ba_time_series_df, state_time_series_df
diff --git a/tell/install_forcing_data.py b/tell/install_forcing_data.py
index 791f789..fc2eca4 100644
--- a/tell/install_forcing_data.py
+++ b/tell/install_forcing_data.py
@@ -26,9 +26,11 @@ class InstallForcingSample:
'0.1.4': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
'0.1.5': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
'1.0.0': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
- '1.1.0': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1'}
+ '1.1.0': 'https://zenodo.org/records/13344803/files/sample_forcing_data.zip?download=1',
+ '1.2.0': 'https://zenodo.org/records/13344803/files/sample_forcing_data.zip?download=1',
+ '1.3.0': 'https://zenodo.org/records/13344803/files/sample_forcing_data.zip?download=1'}
- DEFAULT_VERSION = 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1'
+ DEFAULT_VERSION = 'https://zenodo.org/records/13344803/files/sample_forcing_data.zip?download=1'
def __init__(self, data_dir=None):
self.data_dir = data_dir
diff --git a/tell/install_quickstarter_data.py b/tell/install_quickstarter_data.py
index 719929f..c515c25 100644
--- a/tell/install_quickstarter_data.py
+++ b/tell/install_quickstarter_data.py
@@ -25,9 +25,11 @@ class InstallQuickstarterData:
'0.1.4': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
'0.1.5': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
'1.0.0': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
- '1.1.0': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1'}
+ '1.1.0': 'https://zenodo.org/records/13344957/files/tell_quickstarter_data.zip?download=1',
+ '1.2.0': 'https://zenodo.org/records/13344957/files/tell_quickstarter_data.zip?download=1',
+ '1.3.0': 'https://zenodo.org/records/13344957/files/tell_quickstarter_data.zip?download=1'}
- DEFAULT_VERSION = 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1'
+ DEFAULT_VERSION = 'https://zenodo.org/records/13344957/files/tell_quickstarter_data.zip?download=1'
def __init__(self, data_dir=None):
From 8e6b0d084377d32b8111c60a1f3c40a47fdf6699 Mon Sep 17 00:00:00 2001
From: "Casey D. Burleyson" <31452682+cdburley@users.noreply.github.com>
Date: Mon, 19 Aug 2024 16:01:30 -0700
Subject: [PATCH 3/5] Updated download paths
---
tell/install_forcing_data.py | 3 +--
tell/install_quickstarter_data.py | 3 +--
2 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/tell/install_forcing_data.py b/tell/install_forcing_data.py
index fc2eca4..6e2edf9 100644
--- a/tell/install_forcing_data.py
+++ b/tell/install_forcing_data.py
@@ -27,8 +27,7 @@ class InstallForcingSample:
'0.1.5': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
'1.0.0': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
'1.1.0': 'https://zenodo.org/records/13344803/files/sample_forcing_data.zip?download=1',
- '1.2.0': 'https://zenodo.org/records/13344803/files/sample_forcing_data.zip?download=1',
- '1.3.0': 'https://zenodo.org/records/13344803/files/sample_forcing_data.zip?download=1'}
+ '1.2.0': 'https://zenodo.org/records/13344803/files/sample_forcing_data.zip?download=1'}
DEFAULT_VERSION = 'https://zenodo.org/records/13344803/files/sample_forcing_data.zip?download=1'
diff --git a/tell/install_quickstarter_data.py b/tell/install_quickstarter_data.py
index c515c25..2ace607 100644
--- a/tell/install_quickstarter_data.py
+++ b/tell/install_quickstarter_data.py
@@ -26,8 +26,7 @@ class InstallQuickstarterData:
'0.1.5': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
'1.0.0': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
'1.1.0': 'https://zenodo.org/records/13344957/files/tell_quickstarter_data.zip?download=1',
- '1.2.0': 'https://zenodo.org/records/13344957/files/tell_quickstarter_data.zip?download=1',
- '1.3.0': 'https://zenodo.org/records/13344957/files/tell_quickstarter_data.zip?download=1'}
+ '1.2.0': 'https://zenodo.org/records/13344957/files/tell_quickstarter_data.zip?download=1'}
DEFAULT_VERSION = 'https://zenodo.org/records/13344957/files/tell_quickstarter_data.zip?download=1'
From 1e7df603317990db133dce30a80a54d0b904b04a Mon Sep 17 00:00:00 2001
From: "Casey D. Burleyson" <31452682+cdburley@users.noreply.github.com>
Date: Mon, 19 Aug 2024 16:29:24 -0700
Subject: [PATCH 4/5] Trying workaround
---
tell/install_forcing_data.py | 79 ------------------------------
tell/install_quickstarter_data.py | 81 -------------------------------
2 files changed, 160 deletions(-)
delete mode 100644 tell/install_forcing_data.py
delete mode 100644 tell/install_quickstarter_data.py
diff --git a/tell/install_forcing_data.py b/tell/install_forcing_data.py
deleted file mode 100644
index 6e2edf9..0000000
--- a/tell/install_forcing_data.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import os
-import shutil
-import zipfile
-import requests
-
-from io import BytesIO as BytesIO
-from pkg_resources import get_distribution
-
-
-class InstallForcingSample:
- """Download the TELL sample forcing data package from Zenodo that matches the current installed tell distribution
-
- :param data_dir: Optional. Full path to the directory you wish to store the data in. Default is
- to install it in data directory of the package.
-
- :type data_dir: str
-
- """
-
- # URL for DOI minted example data hosted on Zenodo
- DATA_VERSION_URLS = {'0.0.1': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
- '0.1.0': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
- '0.1.1': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
- '0.1.2': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
- '0.1.3': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
- '0.1.4': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
- '0.1.5': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
- '1.0.0': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
- '1.1.0': 'https://zenodo.org/records/13344803/files/sample_forcing_data.zip?download=1',
- '1.2.0': 'https://zenodo.org/records/13344803/files/sample_forcing_data.zip?download=1'}
-
- DEFAULT_VERSION = 'https://zenodo.org/records/13344803/files/sample_forcing_data.zip?download=1'
-
- def __init__(self, data_dir=None):
- self.data_dir = data_dir
-
- def fetch_zenodo(self):
- """Download the tell sample forcing data package from Zenodo that matches the current tell distribution"""
-
- # Get the current version of TELL that is installed:
- current_version = get_distribution('tell').version
-
- try:
- data_link = InstallForcingSample.DATA_VERSION_URLS[current_version]
-
- except KeyError:
- msg = f"Link to data missing for current version: {current_version}. Using default version: {InstallForcingSample.DEFAULT_VERSION}"
-
- data_link = InstallForcingSample.DEFAULT_VERSION
-
- print(msg)
-
- # Retrieve content from the URL:
- print(f"Downloading the sample forcing data package for tell version {current_version}...")
- r = requests.get(data_link)
-
- # Extract the data from the .zip file:
- with zipfile.ZipFile(BytesIO(r.content)) as zipped:
- zipped.extractall(self.data_dir)
-
- # Remove the empty "__MACOSX" directory:
- shutil.rmtree(os.path.join(self.data_dir, r'__MACOSX'))
-
- # Report that the download is complete:
- print(f"Done!")
-
-
-def install_sample_forcing_data(data_dir=None):
- """Download the tell sample forcing data package from Zenodo
-
- :param data_dir: Optional. Full path to the directory you wish to store the data in. Default is
- to install it in data directory of the package.
-
- :type data_dir: str
- """
-
- zen = InstallForcingSample(data_dir=data_dir)
-
- zen.fetch_zenodo()
diff --git a/tell/install_quickstarter_data.py b/tell/install_quickstarter_data.py
deleted file mode 100644
index 2ace607..0000000
--- a/tell/install_quickstarter_data.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import os
-import shutil
-import zipfile
-import requests
-
-from io import BytesIO as BytesIO
-from pkg_resources import get_distribution
-
-
-class InstallQuickstarterData:
- """Download the TELL sample output data package from Zenodo that matches the current installed tell distribution
-
- :param data_dir: Optional. Full path to the directory you wish to store the data in. Default is
- to install it in data directory of the package.
- :type data_dir: str
-
- """
-
- # URL for DOI minted example data hosted on Zenodo
- DATA_VERSION_URLS = {'0.0.1': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
- '0.1.0': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
- '0.1.1': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
- '0.1.2': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
- '0.1.3': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
- '0.1.4': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
- '0.1.5': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
- '1.0.0': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
- '1.1.0': 'https://zenodo.org/records/13344957/files/tell_quickstarter_data.zip?download=1',
- '1.2.0': 'https://zenodo.org/records/13344957/files/tell_quickstarter_data.zip?download=1'}
-
- DEFAULT_VERSION = 'https://zenodo.org/records/13344957/files/tell_quickstarter_data.zip?download=1'
-
- def __init__(self, data_dir=None):
-
- self.data_dir = data_dir
-
- def fetch_zenodo(self):
- """Download the TELL quickstarter data package from Zenodo that matches the current installed tell distribution"""
-
- # Get the current version of TELL that is installed:
- current_version = get_distribution('tell').version
-
- # Try to download the data:
- try:
- data_link = InstallQuickstarterData.DATA_VERSION_URLS[current_version]
-
- except KeyError:
- msg = f"Link to data missing for current version: {current_version}. Using defaultl version: {InstallQuickstarterData.DEFAULT_VERSION}"
-
- data_link = InstallQuickstarterData.DEFAULT_VERSION
-
- print(msg)
-
- # Retrieve content from the URL:
- print(f"Downloading the quickstarter data package for tell version {current_version}...")
- r = requests.get(data_link)
-
- # Extract the data from the .zip file:
- with zipfile.ZipFile(BytesIO(r.content)) as zipped:
- zipped.extractall(self.data_dir)
-
- # Remove the empty "__MACOSX" directory:
- shutil.rmtree(os.path.join(self.data_dir, r'__MACOSX'))
-
- # Report that the download is complete:
- print(f"Done!")
-
-
-def install_quickstarter_data(data_dir=None):
- """Download the TELL quickstarter data package from Zenodo
-
- :param data_dir: Optional. Full path to the directory you wish to store the data in. Default is
- to install it in data directory of the package.
-
- :type data_dir: str
-
- """
-
- zen = InstallQuickstarterData(data_dir=data_dir)
-
- zen.fetch_zenodo()
From cea646df3a5f47f89ae033a8003e73a7a468246c Mon Sep 17 00:00:00 2001
From: "Casey D. Burleyson" <31452682+cdburley@users.noreply.github.com>
Date: Mon, 19 Aug 2024 16:31:33 -0700
Subject: [PATCH 5/5] Trying workaround
---
tell/install_forcing_data.py | 79 ++++++++++++++++++++++++++++++
tell/install_quickstarter_data.py | 81 +++++++++++++++++++++++++++++++
2 files changed, 160 insertions(+)
create mode 100644 tell/install_forcing_data.py
create mode 100644 tell/install_quickstarter_data.py
diff --git a/tell/install_forcing_data.py b/tell/install_forcing_data.py
new file mode 100644
index 0000000..6e2edf9
--- /dev/null
+++ b/tell/install_forcing_data.py
@@ -0,0 +1,79 @@
+import os
+import shutil
+import zipfile
+import requests
+
+from io import BytesIO as BytesIO
+from pkg_resources import get_distribution
+
+
+class InstallForcingSample:
+ """Download the TELL sample forcing data package from Zenodo that matches the current installed tell distribution
+
+ :param data_dir: Optional. Full path to the directory you wish to store the data in. Default is
+ to install it in data directory of the package.
+
+ :type data_dir: str
+
+ """
+
+ # URL for DOI minted example data hosted on Zenodo
+ DATA_VERSION_URLS = {'0.0.1': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
+ '0.1.0': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
+ '0.1.1': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
+ '0.1.2': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
+ '0.1.3': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
+ '0.1.4': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
+ '0.1.5': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
+ '1.0.0': 'https://zenodo.org/record/6354665/files/sample_forcing_data.zip?download=1',
+ '1.1.0': 'https://zenodo.org/records/13344803/files/sample_forcing_data.zip?download=1',
+ '1.2.0': 'https://zenodo.org/records/13344803/files/sample_forcing_data.zip?download=1'}
+
+ DEFAULT_VERSION = 'https://zenodo.org/records/13344803/files/sample_forcing_data.zip?download=1'
+
+ def __init__(self, data_dir=None):
+ self.data_dir = data_dir
+
+ def fetch_zenodo(self):
+ """Download the tell sample forcing data package from Zenodo that matches the current tell distribution"""
+
+ # Get the current version of TELL that is installed:
+ current_version = get_distribution('tell').version
+
+ try:
+ data_link = InstallForcingSample.DATA_VERSION_URLS[current_version]
+
+ except KeyError:
+ msg = f"Link to data missing for current version: {current_version}. Using default version: {InstallForcingSample.DEFAULT_VERSION}"
+
+ data_link = InstallForcingSample.DEFAULT_VERSION
+
+ print(msg)
+
+ # Retrieve content from the URL:
+ print(f"Downloading the sample forcing data package for tell version {current_version}...")
+ r = requests.get(data_link)
+
+ # Extract the data from the .zip file:
+ with zipfile.ZipFile(BytesIO(r.content)) as zipped:
+ zipped.extractall(self.data_dir)
+
+ # Remove the empty "__MACOSX" directory:
+ shutil.rmtree(os.path.join(self.data_dir, r'__MACOSX'))
+
+ # Report that the download is complete:
+ print(f"Done!")
+
+
+def install_sample_forcing_data(data_dir=None):
+ """Download the tell sample forcing data package from Zenodo
+
+ :param data_dir: Optional. Full path to the directory you wish to store the data in. Default is
+ to install it in data directory of the package.
+
+ :type data_dir: str
+ """
+
+ zen = InstallForcingSample(data_dir=data_dir)
+
+ zen.fetch_zenodo()
diff --git a/tell/install_quickstarter_data.py b/tell/install_quickstarter_data.py
new file mode 100644
index 0000000..2ace607
--- /dev/null
+++ b/tell/install_quickstarter_data.py
@@ -0,0 +1,81 @@
+import os
+import shutil
+import zipfile
+import requests
+
+from io import BytesIO as BytesIO
+from pkg_resources import get_distribution
+
+
+class InstallQuickstarterData:
+ """Download the TELL sample output data package from Zenodo that matches the current installed tell distribution
+
+ :param data_dir: Optional. Full path to the directory you wish to store the data in. Default is
+ to install it in data directory of the package.
+ :type data_dir: str
+
+ """
+
+ # URL for DOI minted example data hosted on Zenodo
+ DATA_VERSION_URLS = {'0.0.1': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
+ '0.1.0': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
+ '0.1.1': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
+ '0.1.2': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
+ '0.1.3': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
+ '0.1.4': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
+ '0.1.5': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
+ '1.0.0': 'https://zenodo.org/record/6804242/files/tell_quickstarter_data.zip?download=1',
+ '1.1.0': 'https://zenodo.org/records/13344957/files/tell_quickstarter_data.zip?download=1',
+ '1.2.0': 'https://zenodo.org/records/13344957/files/tell_quickstarter_data.zip?download=1'}
+
+ DEFAULT_VERSION = 'https://zenodo.org/records/13344957/files/tell_quickstarter_data.zip?download=1'
+
+ def __init__(self, data_dir=None):
+
+ self.data_dir = data_dir
+
+ def fetch_zenodo(self):
+ """Download the TELL quickstarter data package from Zenodo that matches the current installed tell distribution"""
+
+ # Get the current version of TELL that is installed:
+ current_version = get_distribution('tell').version
+
+ # Try to download the data:
+ try:
+ data_link = InstallQuickstarterData.DATA_VERSION_URLS[current_version]
+
+ except KeyError:
+ msg = f"Link to data missing for current version: {current_version}. Using defaultl version: {InstallQuickstarterData.DEFAULT_VERSION}"
+
+ data_link = InstallQuickstarterData.DEFAULT_VERSION
+
+ print(msg)
+
+ # Retrieve content from the URL:
+ print(f"Downloading the quickstarter data package for tell version {current_version}...")
+ r = requests.get(data_link)
+
+ # Extract the data from the .zip file:
+ with zipfile.ZipFile(BytesIO(r.content)) as zipped:
+ zipped.extractall(self.data_dir)
+
+ # Remove the empty "__MACOSX" directory:
+ shutil.rmtree(os.path.join(self.data_dir, r'__MACOSX'))
+
+ # Report that the download is complete:
+ print(f"Done!")
+
+
+def install_quickstarter_data(data_dir=None):
+ """Download the TELL quickstarter data package from Zenodo
+
+ :param data_dir: Optional. Full path to the directory you wish to store the data in. Default is
+ to install it in data directory of the package.
+
+ :type data_dir: str
+
+ """
+
+ zen = InstallQuickstarterData(data_dir=data_dir)
+
+ zen.fetch_zenodo()