diff --git a/.github/workflows/test_fredi.yml b/.github/workflows/test_fredi.yml
index 0bfb2bfe..9b470a25 100644
--- a/.github/workflows/test_fredi.yml
+++ b/.github/workflows/test_fredi.yml
@@ -61,7 +61,7 @@ jobs:
Rscript -e '
devtools::install_github(
repo = "https://github.com/USEPA/FrEDI",
- ref = "main",
+ ref = "${{ github.ref_name }}",
subdir = "FrEDI",
dependencies = FALSE,
upgrade = "never",
@@ -94,6 +94,11 @@ jobs:
### Test results
if(do_tests){
+
+ ### Check if path exists and, if not, create it
+ exists0 <- oPath1 |> dir.exists()
+ if(!exists0){oPath1 |> dir.create(recursive = TRUE)}
+
### Run FrEDI
results0 <- run_fredi()
### Run tests
@@ -119,7 +124,8 @@ jobs:
### Check if path exists and, if not, create it
exists0 <- oPath2 |> dir.exists()
- if(!exists0){oPath2 |> dir.create()}
+ if(!exists0){oPath2 |> dir.create(recursive = TRUE)}
+
### Create report figures
reports0 <- create_DoW_results(
diff --git a/.github/workflows/update_base_data.yml b/.github/workflows/update_base_data.yml
new file mode 100644
index 00000000..00762158
--- /dev/null
+++ b/.github/workflows/update_base_data.yml
@@ -0,0 +1,28 @@
+name: Update Base System Data
+
+on:
+ workflow_dispatch:
+ inputs:
+ fredi_data_branches:
+ type: string
+ description: Which data only branch of FrEDI_data to pull in data from? (i.e. data_only_branch_state_initial)
+
+
+jobs:
+ get_data:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - name: Commit results
+ run: |
+ git config --global user.email "actions@github.com"
+ git config --global user.name "GitHub Actions"
+ git pull --depth=1 --ff https://github.com/USEPA/FrEDI_Data.git ${{ github.event.inputs.fredi_data_branches }} --allow-unrelated-histories
+ mv data/tmp_sysdata.rda FrEDI/R/sysdata.rda
+ git branch --show-current
+ git add FrEDI/R/sysdata.rda
+ git rm -r data
+ git pull origin ${{ github.head_ref }} --autostash --rebase -X ours
+ git status
+ git commit -am "add new sysdata.rda from ${{ github.event.inputs.fredi_data_branches }}"
+ git push
diff --git a/FrEDI/.github/workflows/greetings.yml b/FrEDI/.github/workflows/greetings.yml
deleted file mode 100644
index 0a231c47..00000000
--- a/FrEDI/.github/workflows/greetings.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-name: Greetings
-
-on: [pull_request_target, issues]
-
-jobs:
- greeting:
- runs-on: ubuntu-latest
- permissions:
- issues: write
- pull-requests: write
- steps:
- - uses: actions/first-interaction@v1
- with:
- repo-token: ${{ secrets.GITHUB_TOKEN }}
- issue-message: 'Thanks @${{ github.actor }} , for submitting your issue.'
- pr-message: 'Wohooo, thanks @${{ github.actor }} , for submitting a pull request'
diff --git a/FrEDI/DESCRIPTION b/FrEDI/DESCRIPTION
index e0800ff6..cb874b4c 100644
--- a/FrEDI/DESCRIPTION
+++ b/FrEDI/DESCRIPTION
@@ -41,6 +41,6 @@ Depends: R (>= 3.5.0),
Roxygen: list(markdown = TRUE)
RoxygenNote: 7.2.3
LazyData: TRUE
-Suggests: knitr, rmarkdown, prettydoc, devtools
+Suggests: knitr, rmarkdown, prettydoc, devtools, kableExtra
VignetteBuilder: knitr
Language: en-US
diff --git a/FrEDI/R/aggregate_impacts.R b/FrEDI/R/aggregate_impacts.R
index adb0ea2f..1bd66d68 100644
--- a/FrEDI/R/aggregate_impacts.R
+++ b/FrEDI/R/aggregate_impacts.R
@@ -47,559 +47,617 @@
### This function aggregates outputs produced by temperature binning
aggregate_impacts <- function(
data, ### Data frame of outputs from temperature binning
- aggLevels = c("national", "modelaverage", "impactyear", "impacttype"), ### Levels of aggregation
+ aggLevels = c("national", "modelaverage", "impacttype", "impactyear"), ### Levels of aggregation
columns = c("annual_impacts"), ### Columns to aggregate
- groupByCols = c("sector", "variant", "impactYear", "impactType", "model_type", "model", "region"), ### Columns to group by
- mode = "all"
+ groupByCols = c("sector", "variant", "impactType", "impactYear") |>
+ c("region", "state", "postal") |>
+ c("model_type", "model") |>
+ c("includeaggregate", "sectorprimary"),
+ silent = TRUE
){
-
-
###### Defaults ######
### Not used currently; preserving it in messaging logicals for the future
- msgUser <- T
- ### Modes...specifically SLR interpolation only
- xMode <- ifelse(is.null(mode), "all", mode) |> tolower()
-
- ###### Ungroup Data ######
- data <- data |> ungroup() #; names(data) |> print()
-
+ msgUser <- !silent
+ msg0 <- function(lvl0=1){c("\t") |> rep(lvl0) |> paste(collapse = c(""))}
+ yearCol0 <- c("year")
+ # summaryCols <- columns; rm(columns)
+ ####### By State ######
+ byState <- c("state") %in% (data |> names())
+ if(byState){stateCols0 <- c("state", "postal")} else{stateCols0 <- c()}
+ popCol0 <- byState |> ifelse("state_pop", "reg_pop")
+ # byState |> print()
+
+ ###### Format Data ######
+ ### Ungroup
+ data <- data |> ungroup() #; names(data) |> print()
+ # data |> glimpse()
###### Years Info ######
### Years in data
# c_npdRefYear <- 2090
- c_dataYears <- data$year |> unique()
- # has_plus2090vals <- (c_dataYears[which(c_dataYears > c_npdRefYear)] |> length()) > 0
-
- ###### SLR Info ######
- # assign("slr_cm", rDataList[["slr_cm"]])
- assign("co_models", rDataList[["co_models"]])
- co_slrs <- co_models |> filter(modelType=="slr")
- co_slrs <- co_slrs |> mutate_at(.vars=c("model_dot", "model_label"), as.character)
- co_slrs <- co_slrs |> as.data.frame()
-
- ###### Load Sector Info ######
- name_dfSectors <- "co_sectors"
- sectorCols_old <- c("sector_id", "sector_label", "modelType")
- sectorCols_new <- c("sector_id", "sector", "model_type")
-
- assign(name_dfSectors, rDataList[[name_dfSectors]])
-
- co_sectors <- co_sectors |> select(all_of(sectorCols_old))
- co_sectors <- co_sectors |> rename_with(~sectorCols_new, .cols=sectorCols_old)
- # co_sectors <- co_sectors |> (function(y){names(y) <- c(sectorCols_new); return(y)})
- co_sectors <- co_sectors |> mutate(model_type = model_type |> toupper())
- rm("sectorCols_old", "sectorCols_new")
-
- ###### Load Variant Info ######
- ### Get input scenario info: co_inputScenarioInfo
- ### Load variant info table from sysdata.rda
- name_dfVariant <- "co_variants"
- assign(name_dfVariant, rDataList[[name_dfVariant]])
- ### Format variants
- variantCols_old <- c("sector_id", "variant_label")
- variantCols_new <- c("sector_id", "variant")
- variantCols_other <- c("sectorprimary", "includeaggregate")
- co_variants <- co_variants |> select(c(all_of(variantCols_old), all_of(variantCols_other)))
- # co_variants <- co_variants |> (function(y){names(y) <- c(variantCols_new, variantCols_other); return(y)})
- co_variants <- co_variants |> rename_with(~variantCols_new, .cols=variantCols_old)
- rm("variantCols_old", "variantCols_new", "variantCols_other")
+ c_dataYears <- data[[yearCol0]] |> unique()
- ### Combine sector and variant info
- co_sector_variants <- co_sectors |> left_join(co_variants, by = "sector_id") |> select(-c("sector_id"))
+ ###### Aggregation Levels ######
+ ### Types of summarization to do: default
+ # aggList0 <- c("national", "modelaverage", "impactyear", "impacttype", "all")
+ aggList0 <- c("national", "modelaverage", "impacttype", "impactyear")
+ null_aggLvls <- aggLevels |> is.null()
+ aggLevels <- aggLevels |> tolower()
+ aggNone <- "none" %in% aggLevels
+ aggAll <- "all" %in% aggLevels
+ if(null_aggLvls | aggAll){
+ aggLevels <- aggList0
+ } else if(aggNone){
+ aggLevels <- c()
+ if(msgUser){
+ msg0 (1) |> paste0("No aggregation levels specified...")
+ msg0 (1) |> paste0("No aggregation levels specified...")
+ msg0 (1) |> paste0("Returning data...", "\n")
+ paste0("Finished.", "\n") |> message()
+ } ### End if msgUser
+ return(data)
+ } else{
+ aggLevels <- aggLevels[aggLevels %in% aggList0]
+ } ### End else
+ ### Check if aggregation required
+ requiresAgg <- aggLevels |> length() > 0
+ ###### Aggregation Level Options ######
+ ### Aggregate to impact years or national...reduces the columns in output
+ aggImpYear <- "impactyear" %in% aggLevels
+ aggImpTypes <- "impacttype" %in% aggLevels
+ aggNational <- "national" %in% aggLevels
+ aveModels <- "modelaverage" %in% aggLevels
- ###### Groups Columns ######
- ### Filter to columns in data and mutate those columns to character
- default_groupByCols <- c("sector", "variant", "impactYear", "impactType", "model_type", "model", "region")
+ ### Filter data
+ ### If "national" aggregation, filter out national totals
+ if(aggNational){data <- data |> filter(region!="National Total")}
+ ### If modelAverage %in% aggLevels, filter out model averages
+ if(aveModels ){data <- data |> filter(!(model %in% c("Average", "Model Average")))}
+
+ ###### Get FrEDI Data Objects ######
+ ### Load info tables from sysdata.rda
+ co_models <- "co_models" |> get_frediDataObj("frediData")
+ co_sectors <- "co_sectors" |> get_frediDataObj("frediData")
+ co_variants <- "co_variants" |> get_frediDataObj("frediData")
+
+ ###### - Formate SLR Info ######
+ co_slrs <- co_models |> filter(modelType=="slr")
+ co_slrs <- co_slrs |> mutate_at(c("model_dot", "model_label"), as.character)
+
+ ###### - Format Sector Info ######
+ rename0 <- c("sector_id", "sector_label", "modelType")
+ rename1 <- c("sector_id", "sector", "model_type")
+ co_sectors <- co_sectors |> select(all_of(rename0))
+ co_sectors <- co_sectors |> rename_at(c(rename0), ~rename1)
+ co_sectors <- co_sectors |> mutate_at(c("model_type"), toupper)
+ rm("rename0", "rename1")
+
+ ###### - Format Variant Info ######
+ ### Format variants
+ rename0 <- c("sector_id", "variant_label")
+ rename1 <- c("sector_id", "variant")
+ select0 <- c(rename1) #|> c("sectorprimary", "includeaggregate")
+ co_variants <- co_variants |> rename_at(c(rename0), ~rename1)
+ co_variants <- co_variants |> select(all_of(select0))
+ rm("rename0", "rename1", "select0")
+ ### Combine sector and variant info
+ join0 <- "sector_id"
+ co_sectorVars <- co_sectors |> left_join(co_variants, by =join0)
+ co_sectorVars <- co_sectorVars |> select(-all_of(join0))
+ rm(join0, co_variants)
+
+ ###### Grouping Columns ######
### Use default group by columns if none specified...otherwise, check which are present
- if(is.null(groupByCols)){
- groupByCols <- default_groupByCols
- }
+ groupByCols0 <- c("sector", "variant", "impactType", "impactYear")
+ groupByCols0 <- groupByCols0 |> c("region") |> c(stateCols0)
+ groupByCols0 <- groupByCols0 |> c("model_type", "model")
+ groupByCols0 <- groupByCols0 |> c("sectorprimary", "includeaggregate")
+ if(groupByCols |> is.null()){groupByCols <- groupByCols0}
+ rm(groupByCols0)
+ # ### Convert grouping columns to character
+ # data <- data |> mutate_at(c(groupByCols), as.character)
### Check if columns for grouping are there
- names_data0 <- data |> names()
- is_groupByCol <- groupByCols %in% names_data0
- which_notPresentGroups <- (!is_groupByCol) |> which()
+ isPresent0 <- groupByCols %in% (data |> names())
+ hasNaCols0 <- (!isPresent0) |> which() |> length() > 0
### Message user if some columns aren't present
- if(length(which_notPresentGroups) > 0){
- groupByCols <- groupByCols[which(is_groupByCol)]
- if(msgUser){
- message("\t", "Warning: groupByCols = c(", paste(groupByCols[which_notPresentGroups], collapse = ", "), ") not present...")
- message("\t\t", "Grouping by remaining columns...")
- }
- }
-
- ### Add sector primary and include aggregate
- # newGroupCols <- c("sectorprimary", "includeaggregate") |>
- newGroupCols <- c("sectorprimary", "includeaggregate", "physicalmeasure") |>
- (function(y){y[which(!(y %in% groupByCols))]})() |>
- (function(y){y[which(y %in% names_data0)]})()
- ### If length(newGroupCols)>0, add them
- if(length(newGroupCols)>0){
- groupByCols <- groupByCols |> c(newGroupCols)
- }
- ### Remove extra names
- rm("is_groupByCol", "which_notPresentGroups", "newGroupCols")
+ if(hasNaCols0 & msgUser){
+ msg0(1) |> paste0("Warning: groupByCols = c(", paste(groupByCols[!isPresent0], collapse = ", "), ") not present...") |> message()
+ msg0(2) |> paste0("Grouping by remaining columns...") |> message()
+ } ### End
+ ### Adjust to desired values
+ groupByCols <- groupByCols[isPresent0]
+ rm(hasNaCols0, isPresent0)
+ # groupByCols |> print()
+
+ ### Drop some columns from grouping columns
+ if(aggImpTypes){
+ scalarCols <- c("physScalar", "physAdj", "damageAdj", "econScalar", "econAdj", "econMultiplier") |> paste("Name")
+ # scalarCols <- c("physScalar", "physAdj", "damageAdj", "econScalar", "econAdj", "econMultiplier")
+ # scalarCols <- scalarCols0 |> map(~.x |> paste0(c(scalarSuffix0))) |> unlist()
+ dropCols <- c("physicalmeasure") |> c(scalarCols) |> c(popCol0, "national_pop")
+ isDropCol <- groupByCols %in% dropCols
+ hasDropCols <- isDropCol |> any()
+ ### If hasDropCols
+ if(hasDropCols){
+ ### Drop levels
+ groupByCols <- groupByCols |> (function(y){y[!(y %in% dropCols)]})()
+ } ### End if(hasDropCols)
+
+ ### If message user
+ if(hasDropCols & msgUser){
+ ### Message user
+ msg0 (1) |> paste0(
+ "Warning: cannot group by columns = c(`",
+ groupByCols[isDropCol] |> paste(collapse="`, `"),
+ "`) when 'impacttype' in aggLevels!") |> message()
+ msg0 (2) |> paste0(
+ "Dropping columns = c(`",
+ groupByCols[isDropCol] |> paste(collapse="`, `"),
+ "`) from grouping columns..."
+ ) |> message()
+ }### End if(msgUsr)
+ ### Remove extra names
+ rm(scalarCols, dropCols, isDropCol, hasDropCols)
+ } ### End if(aggImpTypes)
###### Summary Columns ######
### Columns to summarize
# if(!is.null(columns)){summaryCols <- columns}
- default_summaryCols <- c("annual_impacts")
- if(is.null(columns)) {summaryCols <- default_summaryCols}
- else {summaryCols <- columns}
-
- is_sumByCol <- summaryCols %in% names_data0
- which_notPresentSums <- (!is_sumByCol) |> which()
+ summaryCols0 <- c("annual_impacts")
+ nullSumCols <- columns |> is.null()
+ if(nullSumCols) {
+ summaryCols <- summaryCols0
+ } else {
+ summaryCols <- columns
+ } ### End else
+ isPresent0 <- summaryCols %in% (data |> names())
+ hasNaCols0 <- (!isPresent0) |> which() |> length() > 0
### Message user if some columns aren't present
- if(length(which_notPresentSums) > 0){
- summaryCols <- summaryCols[which(is_sumByCol)]
+ if(hasNaCols0 & msgUser){
+ msg0 (1) |> paste0("Warning: columns = c(", paste(summaryCols[!isPresent0], collapse = ", "), ") not present...") |> message()
+ msg0 (2) |> paste0("Aggregating values in columns = c(", paste(summaryCols[isPresent0], collapse = ", "), "...") |> message()
+ } ### End if no sum columns present
+ ### Drop missing columns
+ summaryCols <- summaryCols[isPresent0]
+ rm(summaryCols0, nullSumCols, isPresent0, hasNaCols0)
+
+ ### Drop some columns from summary columns
+ if(aggImpTypes){
+ scalarCols <- c("physScalar", "physAdj", "damageAdj", "econScalar", "econAdj", "econMultiplier") |> paste("Value")
+ scalarCols <- scalarCols |> c("physScalar", "econScalar", "physEconScalar")
+ scalarCols <- c("c0", "c1", "exp0", "year0") |> c(scalarCols)
+ dropCols <- c("physical_impacts")
+ isDropCol <- summaryCols %in% dropCols
+ hasDropCols <- isDropCol |> any()
+ ### If hasDropCols drop columns
+ if(hasDropCols){
+ ### Drop levels
+ summaryCols <- summaryCols |> (function(y){y[!(y %in% dropCols)]})()
+ } ### End if(hasDropCols)
+
+ ### If, message user
+ if(hasDropCols & msgUser){
+ ### Message user
+ msg0 (1) |> paste0(
+ "Warning: cannot aggregate columns = c(`",
+ summaryCols[isDropCol] |> paste(collapse="`, `"),
+ "`) when 'impacttype' in aggLevels!") |> message()
+ msg0 (2) |> paste0(
+ "Dropping columns = c(`",
+ summaryCols[isDropCol] |> paste(collapse="`, `"),
+ "`) from summary columns...") |> message()
+ } ### End if(hasDropCols)
+
+ ### Remove extra names
+ rm(scalarCols, dropCols, isDropCol, hasDropCols)
+ } ### End if(aggImpTypes)
+
+ ### Drop some columns if certain aggLevels present
+ if(aggImpTypes ){
+ dropCols <- c("scaled_impacts")
+ isDropCol <- summaryCols %in% dropCols
+ hasDropCols <- isDropCol |> any()
+ ### If hasDropCols, message user
+ if(hasDropCols){
+ ### Drop levels
+ summaryCols <- summaryCols |> (function(y){y[!(y %in% dropCols)]})()
+ } ### End if(hasDropCols)
+
+ ###
if(msgUser){
- message("\t", "Warning: columns = c(", paste(summaryCols[which_notPresentSum], collapse = ", "), ") not present...")
- message("\t\t", "Aggregating results for columns c(", paste(summaryCols, collapse=", "),")...")
- }
- }
- rm("is_sumByCol", "which_notPresentSums")
- summaryCol1 <- summaryCols[1]
- ### Add physical impacts summary column
- newSumCols <- c("physical_impacts") |>
- (function(y){y[which(!(y %in% summaryCols))]})() |>
- (function(y){y[which(y %in% names_data0)]})()
- ### If length(newGroupCols)>0, add them
- if(length(newSumCols)>0){
- summaryCols <- summaryCols |> c(newSumCols)
- }
- ### Number of summary columns
- num_summaryCols <- summaryCols |> length()
- data <- data |> mutate_at(.vars=c(all_of(groupByCols)), as.character)
-
- ###### Aggregation Levels ######
- ### Types of summarization to do: default
- aggList0 <- c("national", "modelaverage", "impactyear", "impacttype", "all")
- if(!is.null(aggLevels)){
- aggLevels <- aggLevels |> tolower()
- ### If the user specified none, return the data
- if("none" %in% aggLevels){
- aggLevels <- c()
- if(msgUser){
- message("\t", "No aggregation levels specified...")
- message("\t", "Returning data...")
- message("Finished.")
- }
- return(data)
- }
- ### If the user specified all, use all the aggregation levels
- if("all" %in% aggLevels) aggLevels <- aggList0
- } else{
- aggLevels <- aggList0
- }
-
- if(xMode=="slrinterpolation"){
- aggLevels <- "modelaverage"
- }
-
- requiresAgg <- length(aggLevels) > 0
-
- ###### Aggregation Level Options ######
- ### Aggregate to impact years or national...reduces the columns in output
- impactYearsAgg <- ifelse("impactyear" %in% aggLevels, T, F)
- nationalAgg <- ifelse("national" %in% aggLevels, T, F)
- impactTypesAgg <- ifelse("impacttype" %in% aggLevels, T, F)
- modelAveAgg <- ifelse("modelaverage" %in% aggLevels, T, F)
+ ### Message user
+ msg0 (1) |> paste0(
+ "Warning: cannot aggregate columns = c(`",
+ summaryCols[!isDropCol] |> paste(collapse="`, `"),
+ "`) when aggLevels != 'none'") |> message()
+ msg0 (2) |> paste0(
+ "Dropping columns = c(`",
+ summaryCols[ isDropCol] |> paste(collapse="`, `"),
+ "`) from summary columns...") |> message()
+ } ### End if(msgUser)
+ ### Remove extra names
+ rm(dropCols, isDropCol, hasDropCols)
+ } ### End if(aggImpTypes)
+
+ ### Get single summary column
+ summaryCol1 <- summaryCols[1]
+ ### Number of summary columns
+ num_sumCols <- summaryCols |> length()
+ if(!num_sumCols){
+ msg0 (1) |> paste0("Warning: no columns to aggregate!") |> message()
+ msg0 (2) |> paste0("Exiting...") |> message()
+ return(data)
+ } ### End if(!num_sumCols)
+
+ ###### Format Columns ######
+ ### Make sure all summary values are numeric
+ mutate0 <- c("sectorprimary", "includeaggregate")
+ chrCols0 <- groupByCols |> (function(y){y[!(y %in% c(mutate0))]})()
+ numCols0 <- summaryCols |> c(mutate0)
+ numCols0 <- numCols0 |> c("gdp_usd", "national_pop", "gdp_percap")
+ numCols0 <- numCols0 |> c("driverValue") |> c(popCol0) |> c(yearCol0)
+ numCols0 <- numCols0 |> unique()
+ # data <- data |> mutate_at(c(chrCols0), as.character)
+ data <- data |> mutate_at(c(numCols0), as.character)
+ data <- data |> mutate_at(c(numCols0), as.numeric )
+ rm(mutate0)
###### Standardize Columns ######
### Associated Columns
- baseScenarioCols <- c("year", "gdp_usd", "national_pop", "gdp_percap") |> (function(y){y[which(y %in% names(data))]})()
- regionalPopCols <- c("year", "region", "reg_pop") |> (function(y){y[which(y %in% names(data))]})()
- nationalPopCols <- c("year", "region", "national_pop") |> (function(y){y[which(y %in% names(data))]})()
- driverScenarioCols <- c("year", "model_type", "driverType", "driverUnit", "driverValue") |> (function(y){y[which(y %in% names(data))]})()
-
+ # data |> glimpse()
+ baseCols <- c("year", "gdp_usd", "national_pop", "gdp_percap")
+ regPopCols <- c("year", "region") |> c(stateCols0) |> c(popCol0) |> unique()
+ natPopCols <- c("year", "region") |> c("national_pop")
+ driverCols <- c("year", "model_type", "driverType", "driverUnit", "driverValue")
+
+ ### Get names in names
+ names0 <- data |> names()
+ baseCols <- baseCols |> (function(y){y[(y %in% names0)]})()
+ regPopCols <- regPopCols |> (function(y){y[(y %in% names0)]})()
+ natPopCols <- natPopCols |> (function(y){y[(y %in% names0)]})()
+ driverCols <- driverCols |> (function(y){y[(y %in% names0)]})()
+ rm(names0)
+
+ # ### Add state_pop column
+ # if(aggNational){
+ # groupByCols <- groupByCols |> c(regPopCols) |> unique()
+ # summaryCols <- summaryCols |> (function(y){y[!(y %in% groupByCols)]})()
+ # } ### End if(aggNational)
### List of standardized columns
- standardCols <- c(baseScenarioCols, regionalPopCols, nationalPopCols) |> unique()
- # standardCols <- c(standardCols) |> unique()
- # summaryCols <- c(summaryCols) |> unique()
- ### Standardize columns
- standardCols <- c(groupByCols, standardCols, driverScenarioCols, summaryCols) |> unique()
-
-
- ### If "national" aggregation, filter out national totals
- if(nationalAgg){
- data <- data |> filter(region!="National Total")
- }
- ### If modelAverage %in% aggLevels, filter out model averages
- if(modelAveAgg){
- data <- data |> filter(!(model %in% c("Average", "Model Average")))
- }
- data <- data[,(names(data) %in% standardCols)]#; names(data) |> print
+ standardCols <- c(groupByCols, baseCols, regPopCols, natPopCols) |> unique()
+ standardCols <- standardCols |> c(driverCols, summaryCols) |> unique()
+ scenarioCols <- standardCols |> (function(y){y[!(y %in% c(groupByCols, yearCol0, summaryCols))]})()
+ data <- data |> select(any_of(standardCols))#; names(data) |> print
###### Base Scenario Info ######
### Some values are the same for all runs and regions...separate those values
- sectorsList <- data$sector |> unique()
- sector0 <- sectorsList |> first()
- variant0 <- (data |> filter(sector==sector0))$variant |> unique() |> first()
- region0 <- (data |> filter(sector==sector0))$region |> unique() |> first()
- model0 <- (data |> filter(sector==sector0))$model |> unique() |> first()
- impactType0 <- (data |> filter(sector==sector0))$impactType |> unique() |> first()
- ### Base Scenario and regional population
- baseScenario <- data |> filter(sector == sector0, variant == variant0, region == region0, model == model0, impactType == impactType0)
- regionalPop <- data |> filter(sector == sector0, variant == variant0, model == model0, impactType == impactType0)
-
- ### Filter to impact types
- if(impactTypesAgg){
- impactType0 <- baseScenario$impactType |> unique() |> first()
- baseScenario <- baseScenario |> filter(impactType == impactType0)
- regionalPop <- regionalPop |> filter(impactType == impactType0)
- }
- ### Filter to impact years
- if(impactYearsAgg){
- impactYear0 <- baseScenario$impactYear |> unique() |> first()
- baseScenario <- baseScenario |> filter(impactYear == impactYear0)
- regionalPop <- regionalPop |> filter(impactYear == impactYear0)
- }
- ### Select columns
- ### Base Scenario, regional population, national population
- baseScenario <- baseScenario |> select(all_of(baseScenarioCols))
- regionalPop <- regionalPop |> select(all_of(regionalPopCols))
+ baseScenario <- data |>
+ group_by_at(c(baseCols)) |>
+ summarize(n=n(), .groups="keep") |> ungroup() |>
+ select(-c("n"))
+ ### Regional population
+ regionalPop <- data |>
+ group_by_at(c(regPopCols)) |>
+ summarize(n=n(), .groups="keep") |> ungroup() |>
+ select(-c("n"))
+ # baseCols |> print(); baseScenario |> glimpse()
+ # regPopCols |> print(); regionalPop |> glimpse()
### Create national population scenario from the base scenario
nationalPop <- baseScenario |>
mutate(region = "National Total") |>
- select(all_of(nationalPopCols)) |>
- rename(reg_pop=national_pop)
-
- ###### Driver Scenario ######
- ### Get unique model types, sectors, variants, and models
- names_x <- data |> names()
- modelTypesList <- data$model_type |> unique()
- driverScenario <- modelTypesList |>
- lapply(function(model_type_i){
- ### Filter to sector
- df_i <- data |> filter(model_type==model_type_i)
- sector_i <- df_i$sector |> unique() |> first()
- df_i <- df_i |> filter(sector == sector_i)
- ### Filter to variant
- variant_i <- df_i$variant |> unique() |> first()
- df_i <- df_i |> filter(variant == variant_i)
- ### Filter to impact type
- if("impactType" %in% names_x){
- type_i <- df_i$impactType |> unique() |> first()
- df_i <- df_i |> filter(impactType == type_i)
- }
- ### Filter to region
- if("region" %in% names_x){
- region_i <- df_i$region |> unique() |> first()
- df_i <- df_i |> filter(region == region_i)
- }
- ### Filter to model
- if("model" %in% names_x){
- model_i <- df_i$model |> unique() |> first()
- df_i <- df_i |> filter(model == model_i)
- }
- ### Filter to impact year
- if("impactYear" %in% names_x){
- year_i <- df_i$impactYear |> unique() |> first()
- df_i <- df_i |> filter(impactYear == year_i)
- }
- ### Select columns
- df_i <- df_i |> select(all_of(driverScenarioCols))
- ### Return
- return(df_i)
- }) |> (function(x){do.call(rbind, x)})()
- # driverScenario |> dim() |> print()
+ select(all_of(natPopCols)) |>
+ rename_at(c("national_pop"), ~popCol0)
+ if(byState){nationalPop <- nationalPop |> mutate(state="All", postal="US")}
+ ### Driver Scenario
+ driverScenario <- data |>
+ group_by_at(c(driverCols)) |>
+ summarize(n=n(), .groups="keep") |> ungroup() |>
+ select(-c("n"))
+
+
###### Aggregation ######
- # if(requiresAgg){
- # if(msgUser) message("Aggregating impacts...")
- # }
- ###### Save a copy of the data
- scenarioCols <- c(baseScenarioCols, regionalPopCols, nationalPopCols, driverScenarioCols) |> unique() |>
- (function(y){y[which(!(y %in% c(groupByCols, "year")))]})() #; scenarioCols |> print()
+ # if(msgUser & requiresAgg){message("Aggregating impacts...")}
### Select appropriate columns
- df_aggImpacts <- data |> select(-c(all_of(scenarioCols)))
- # df_aggImpacts |> nrow() |> print(); df_aggImpacts |> head() |> glimpse()
+ df_agg <- data |> select(-all_of(scenarioCols))
+ rm(data)
+ # df_agg |> nrow() |> print(); df_agg |> head() |> glimpse()
- ###### Impact Years ######
+ ###### ** Impact Years ######
### Separate into years after 2090 and before 2090
- if(impactYearsAgg){
- if(msgUser) message("\t", "Interpolating between impact year estimates...")
+ if(aggImpYear){
+ if(msgUser){msg0 (1) |> paste0("Interpolating between impact year estimates...") |> message()}
### Ungroup first
- df_aggImpacts <- df_aggImpacts |> ungroup()
- # summaryCol1 <- summaryCols[1]
+ df_agg <- df_agg |> ungroup()
### Group by columns
- groupCols0 <- groupByCols[which(groupByCols != "impactYear" )]
+ # groupCols0 <- groupByCols |> (function(y){y[!(y %in% c("impactYear", yearCol0))]})()
+ group0 <- groupByCols |> (function(y){y[!(y %in% c("impactYear", yearCol0))]})()
+ group0 <- group0 |> c("year")
### Impact years
impactYears <- c(2010, 2090) |> as.character()
- impactYear1 <- impactYears[1]
- impactYear2 <- impactYears[2]
+ cImpYear1 <- impactYears[1]
+ cImpYear2 <- impactYears[2]
+ nImpYear1 <- cImpYear1 |> as.numeric()
+ nImpYear2 <- cImpYear2 |> as.numeric()
### Separate data into years > 2090, years <= 2090
c_cutoff_yr <- 2090
- df_aggImp_1 <- df_aggImpacts |> filter(year <= c_cutoff_yr)
- df_aggImp_2 <- df_aggImpacts |> filter(year > c_cutoff_yr)
- rm("df_aggImpacts")
+ df_aggImp_1 <- df_agg |> filter(year <= c_cutoff_yr)
+ df_aggImp_2 <- df_agg |> filter(year > c_cutoff_yr)
+ rm(df_agg)
### Then do the post-2090 results
### Exclude 2010 results
- df_aggImpacts <- df_aggImp_2 |> filter(impactYear != impactYear1) |> mutate(impactYear="Interpolation")
- rm("df_aggImp_2")
+ df_agg <- df_aggImp_2 |> filter(impactYear != cImpYear1) |> mutate(impactYear="Interpolation")
+ rm(df_aggImp_2)
### Process pre-2090:
### Separate out observations without impact years
df_naYears <- df_aggImp_1 |> filter(!(impactYear %in% impactYears)) |> mutate(impactYear="Interpolation")
### New upper and lower column names
- new_2090SummaryCols <- paste(summaryCols, "2090", sep="_")
- new_2010SummaryCols <- paste(summaryCols, "2010", sep="_")
+ sumCols2010 <- summaryCols |> paste0("_", "2010")
+ sumCols2090 <- summaryCols |> paste0("_", "2090")
### Filter to impact year in impact years
- df_impYears <- df_aggImp_1 |> filter(impactYear %in% impactYears)
- nrow_impYrs <- df_impYears |> nrow()
- rm("df_aggImp_1")
+ df_impYears <- df_aggImp_1 |> filter(impactYear %in% impactYears)
+ nrow_impYrs <- df_impYears |> nrow()
+ rm(df_aggImp_1)
### For nrow_impYrs > 0
if(nrow_impYrs){
### Filter to other lower models and then bind with the zero values, drop model column
- df2090 <- df_impYears |> filter(impactYear == impactYear2) |> select(-c("impactYear"))
- df2090 <- df2090 |> (function(y){
- y <- y |> as.data.frame()
- y[,new_2090SummaryCols] <- y[,summaryCols]
- return(y)
- })()
-
+ df2010 <- df_impYears |> filter(impactYear == cImpYear1) |> select(-c("impactYear"))
+ df2090 <- df_impYears |> filter(impactYear == cImpYear2) |> select(-c("impactYear"))
+ ### Update summary columns
+ df2010[,sumCols2010] <- df2010[,summaryCols]
+ df2090[,sumCols2090] <- df2090[,summaryCols]
### Drop summary columns from 2010
- df2010 <- df_impYears |> filter(impactYear == impactYear1) |> select(-c("impactYear"))
- df2010 <- df2010 |> (function(y){
- y <- y |> as.data.frame()
- y[,new_2010SummaryCols] <- y[,summaryCols]
- return(y)
- })() |>
- select(-c(all_of(summaryCols)))
+ df2010 <- df2010 |> select(-all_of(summaryCols))
### Join upper and lower data frames and calculate the numerator, denominator, and adjustment factor
- df_impYears <- df2090 |> left_join(df2010, by=c(groupCols0, "year"))
- rm("df2090", "df2010")
+ df_impYears <- df2090 |> left_join(df2010, by=c(group0))
+ # df2090 |> glimpse(); df2010 |> glimpse(); df_impYears |> glimpse()
+ rm(df2090, df2010)
+
### Add Impact year numerator and denominator
- df_impYears <- df_impYears |> mutate(numer_yr = year-as.numeric(impactYear1))
- df_impYears <- df_impYears |> mutate(denom_yr = as.numeric(impactYear2)-as.numeric(impactYear1))
- df_impYears <- df_impYears |> mutate(adj_yr = numer_yr/denom_yr)
+ df_impYears <- df_impYears |> mutate(numer_yr = year - nImpYear1)
+ df_impYears <- df_impYears |> mutate(denom_yr = nImpYear2 - nImpYear1)
+ df_impYears <- df_impYears |> mutate(adj_yr = numer_yr / denom_yr )
### Iterate over summary columns
- for(i in 1:num_summaryCols){
+ for(i in 1:num_sumCols){
### Upper/lower
- col_i <- summaryCols[i]
- col_i_2010 <- col_i |> paste("2010", sep="_")
- col_i_2090 <- col_i |> paste("2090", sep="_")
+ col_i <- summaryCols[i]
+ col_i_2010 <- col_i |> paste0("_", "2010")
+ col_i_2090 <- col_i |> paste0("_", "2090")
### Calculate numerator and denominator
- df_impYears <- df_impYears |> as.data.frame()
-
- df_impYears$new_factor <- df_impYears[,col_i_2090] - df_impYears[,col_i_2010]
- df_impYears$new_value <- df_impYears[,col_i_2010]
+ df_impYears[["new_factor"]] <- df_impYears[[col_i_2090]] - df_impYears[[col_i_2010]]
+ df_impYears[["new_value" ]] <- df_impYears[[col_i_2010]]
# df_slrOther |> names() |> print()
### Update the new value
- oldCol_i <- col_i |> c()
- newCol_i <- "new_value" |> c()
+ oldCol_i <- col_i |> c()
+ newCol_i <- "new_value" |> c()
### Mutate and rename
+ select0 <- c(col_i, "new_factor")
+ select1 <- c(col_i_2010, col_i_2090)
df_impYears <- df_impYears |> mutate(new_value = new_value + new_factor * adj_yr)
- df_impYears <- df_impYears |> select(-c(all_of(col_i), "new_factor"))
- df_impYears <- df_impYears |> rename_with(~oldCol_i[which(newCol_i==.x)], .cols=newCol_i)
- df_impYears <- df_impYears |> select(-c(all_of(col_i_2010), all_of(col_i_2090)))
- rm("i", "col_i", "col_i_2010", "col_i_2090", "oldCol_i", "newCol_i")
- } ### End for(i in 1:num_summaryCols)
-
+ df_impYears <- df_impYears |> select(-all_of(select0))
+ df_impYears <- df_impYears |> rename_at(c(newCol_i), ~oldCol_i)
+ df_impYears <- df_impYears |> select(-all_of(select1))
+ ### Remove values
+ rm(i, col_i, col_i_2010, col_i_2090, oldCol_i, newCol_i, select0)
+ } ### End for(i in 1:num_sumCols)
+ ### Add new factor and drop columns
+ select0 <- c("numer_yr", "denom_yr", "adj_yr")
df_impYears <- df_impYears |> mutate(impactYear="Interpolation")
- df_impYears <- df_impYears |> select(-c("numer_yr", "denom_yr", "adj_yr"))
- }
- rm("impactYears", "impactYear1", "impactYear2", "new_2010SummaryCols", "new_2090SummaryCols")
- rm("groupCols0", "c_cutoff_yr")
+ df_impYears <- df_impYears |> select(-all_of(select0))
+ rm(select0)
+ } ### End if(nrow_impYrs)
+ rm(impactYears, cImpYear1, cImpYear2, sumCols2010, sumCols2090, c_cutoff_yr)
### Add back into values without NA years
### Join post 2090 results with earlier results
- df_aggImp_1 <- df_impYears |> rbind(df_naYears) |> mutate(impactYear="Interpolation")
- df_aggImpacts <- df_aggImpacts |> rbind(df_aggImp_1)
- rm("df_impYears", "df_naYears", "df_aggImp_1")
- }
- # paste0("Finished impact year interpolation: ", nrow(df_aggImpacts)) |> print(); df_aggImpacts |> head() |> glimpse()
-
- ###### Model Averages ######
+ df_aggImp_1 <- df_impYears |> rbind(df_naYears) |> mutate(impactYear="Interpolation")
+ df_agg <- df_agg |> rbind(df_aggImp_1)
+ rm(df_impYears, df_naYears, df_aggImp_1, group0)
+ } ### if(aggImpYear)
+ # paste0("Finished impact year interpolation: ", nrow(df_agg)) |> print(); df_agg |> head() |> glimpse()
+ # "got here1" |> print()
+
+ ###### ** Model Averages ######
+ # groupByCols |> print(); df_agg |> glimpse()
### Average values across models
- if(modelAveAgg){
- modelAveMsg <- ifelse(xMode=="slrinterpolation", "Interpolating SLR impacts..." , "Calculating model averages...")
- if(msgUser) message("\t", modelAveMsg)
+ if(aveModels){
+ modelAveMsg <- "Calculating model averages..."
+ if(msgUser){msg0 (1) |> paste0(modelAveMsg) |> message()}
### Ungroup first
- df_aggImpacts <- df_aggImpacts |> mutate_at(.vars=c("model"), as.character) |> ungroup()
+ df_agg <- df_agg |> ungroup()
+ # df_agg <- df_agg |> mutate_at(c("model"), as.character) |> ungroup()
### Group by columns
- groupCols0 <- groupByCols[which(groupByCols != "model" )]
+ group0 <- groupByCols |> (function(y){y[!(y %in% c("model", yearCol0))]})()
+ group0 <- group0 |> c("year")
+ # group0 |> print()
### Separate model types
- df_gcm <- df_aggImpacts |> filter(tolower(model_type)=="gcm")
- df_slr <- df_aggImpacts |> filter(tolower(model_type)=="slr")
- rm("df_aggImpacts")
- ###### GCM #######
- if(nrow(df_gcm)){
- ### Names of agg impacts
- names_gcms <- df_gcm |> names()
+ df_gcm <- df_agg |> filter(model_type |> tolower() == "gcm")
+ df_agg <- df_agg |> filter(model_type |> tolower() != "gcm")
+ do_gcm <- df_gcm |> nrow() > 0
+ ### Calculate GCM model averages
+ if(do_gcm){
### Calculate number of non missing values
- ### Group data, sum data, calculate averages, and drop NA column
df_modelAves <- df_gcm |> (function(w){
- w$not_isNA <- (!is.na(w[,summaryCol1] |> as.vector()))*1
- return(w)
+ w |> mutate(not_isNA = 1 * (!(w[[summaryCol1]] |> is.na())))
})()
+ ### Group data, sum data, calculate averages, and drop NA column
+ sum0 <- summaryCols |> c("not_isNA")
df_modelAves <- df_modelAves |>
- group_by_at(c(all_of(groupCols0), "year")) |>
- summarize_at(.vars=c(all_of(summaryCols), "not_isNA"), sum, na.rm=T) |> ungroup()
- df_modelAves <- df_modelAves |> mutate(not_isNA = not_isNA |> na_if(0)) |>
- as.data.frame() |> (function(x){
- x[,summaryCols] <- x[,summaryCols] / x$not_isNA
- return(x)
- })()
+ group_by_at(c(group0)) |>
+ summarize_at(c(sum0), sum, na.rm=T) |> ungroup()
+ rm(sum0)
+ ### Adjust for non-missing values
+ df_modelAves <- df_modelAves |> mutate(not_isNA = not_isNA |> na_if(0))
+ # df_modelAves[,summaryCols] <- df_modelAves[,summaryCols] / df_modelAves[["not_isNA"]]
+ df_modelAves <- df_modelAves |> (function(x){
+ x[,summaryCols] <- x[,summaryCols] / x[["not_isNA"]]; return(x)
+ })()
+ ### Drop columns
df_modelAves <- df_modelAves |> select(-c("not_isNA"))
+ ### Mutate models
df_modelAves <- df_modelAves |> mutate(model = "Average")
-
### Add observations back in
- # df_aggImpacts <- df_aggImpacts |> rbind(df_modelAves)
+ # df_agg <- df_agg |> rbind(df_modelAves)
df_gcm <- df_gcm |> rbind(df_modelAves)
- rm("names_gcms", "df_modelAves")
+ rm( df_modelAves)
} ### End if nrow(df_gcm)
### Bind GCM and SLR results
- df_aggImpacts <- df_gcm |> rbind(df_slr)
- rm( "df_gcm", "df_slr", "groupCols0")
+ df_agg <- df_gcm |> rbind(df_agg)
+ rm(df_gcm, group0)
} ### End if "model" %in% aggLevels
- # paste0("Finished model aggregation: ", nrow(df_aggImpacts)) |> print(); df_aggImpacts |> head() |> glimpse()
+ # paste0("Finished model aggregation: ", nrow(df_agg)) |> print();
+ # df_agg |> glimpse()
+ # "got here2" |> print()
- ###### National Totals ######
- if(nationalAgg){
- if(msgUser) message("\t", "Calculating national totals...")
+ ###### ** National Totals ######
+ if(aggNational){
+ if(msgUser){msg0 (1) |> paste0("Calculating national totals...") |> message()}
### Ungroup first
- df_aggImpacts <- df_aggImpacts |> ungroup()
- ### Group by columns
- groupCols0 <- groupByCols[which(groupByCols != "region" )]
- ### Filter to national values and not national values
+ df_agg <- df_agg |> ungroup()
+ ### Grouping columns
+ # group0 <- groupByCols |> (function(y){y[!(y %in% c("region", stateCols0, popCol0, yearCol0))]})()
+ group0 <- groupByCols |> (function(y){y[!(y %in% c("region", stateCols0, popCol0, yearCol0))]})()
+ group0 <- group0 |> c("year")
### Calculate number of non missing values
- df_national <- df_aggImpacts |> (function(w){
- w <- w |> as.data.frame()
- w$not_isNA <- (!is.na(w[,summaryCol1]))*1
- return(w)
+ df_national <- df_agg |> (function(w){
+ w |> mutate(not_isNA = 1 * (!(w[[summaryCol1]] |> is.na())))
})()
### Group data, sum data, calculate averages, and drop NA column
- df_national <- df_national |>
- group_by_at(c(all_of(groupCols0), "year")) |>
- summarize_at(vars(all_of(summaryCols), not_isNA), sum, na.rm=T) |> ungroup()
- df_national <- df_national |> mutate(not_isNA = (not_isNA>=1)*1)
- df_national <- df_national |> mutate(not_isNA = not_isNA |> na_if(0))
- df_national <- df_national |> (function(x){
- x[, summaryCols] <- x[, summaryCols]*x$not_isNA; return(x)
+ # sum0 <- summaryCols |> c(popCol0) |> c("not_isNA")
+ sum0 <- summaryCols |> c("not_isNA")
+ # df_national |> glimpse()
+ df_national <- df_national |>
+ group_by_at(c(group0)) |>
+ summarize_at(vars(sum0), sum, na.rm=T) |> ungroup()
+ rm(sum0)
+ ### Adjust non-missing values
+ df_national <- df_national |> mutate(not_isNA = (not_isNA > 0) * 1)
+ df_national <- df_national |> mutate(not_isNA = not_isNA |> na_if(0))
+ df_national <- df_national |> (function(x){
+ x[, summaryCols] <- x[, summaryCols] * x[["not_isNA"]]; return(x)
})()
- df_national <- df_national |> select(-c("not_isNA"))
- df_national <- df_national |> mutate(region="National Total")
+ ### Drop columns, adjust values
+ df_national <- df_national |> select(-c("not_isNA"))
+ df_national <- df_national |> mutate(region="National Total")
+ ### Join with National Pop
+ # join0 <- natPopCols |> (function(y){y[!(y %in% c("national_pop", popCol0))]})()
+ # df_national <- df_national |> left_join(nationalPop, by = c(join0))
+ if(byState){
+ df_national <- df_national |> mutate(state ="All")
+ df_national <- df_national |> mutate(postal="US")
+ } ### End if(byState)
+
### Add back into regional values and bind national population to impact types
- df_aggImpacts <- df_aggImpacts |> rbind(df_national);
- regionalPop <- regionalPop |> rbind(nationalPop)
+ # df_agg |> glimpse(); df_national |> glimpse()
+ df_agg <- df_agg |> rbind(df_national);
+
+ ### Add national to total populations
+ # regionalPop |> glimpse(); nationalPop |> glimpse()
+ regionalPop <- regionalPop |> rbind(nationalPop)
### Remove values
- rm("df_national", "nationalPop", "groupCols0")
+ rm(df_national, group0)
} ### End if national
- # paste0("Finished national totals: ", nrow(df_aggImpacts)) |> print; df_aggImpacts |> head |> glimpse
- # "got here1" |> print
+ # paste0("Finished national totals: ", nrow(df_agg)) |> print; df_agg |> head |> glimpse
+ # "got here3" |> print()
- ###### Impact Types ######
+ ###### ** Impact Types ######
### Summarize by Impact Type
- if(impactTypesAgg){
- if(msgUser) message("\t", "Summing across impact types...")
+ if(aggImpTypes){
+ if(msgUser){msg0 (1) |> paste0("Summing across impact types...") |> message()}
### Ungroup first
- df_aggImpacts <- df_aggImpacts |> ungroup()
- ### Group by columns
- dropCols0 <- c("physicalmeasure", "physical_impacts")
- df_aggImpacts <- df_aggImpacts |> (function(y){
- names_y <- y |> names()
- names_y <- names_y[which(!(names_y %in% dropCols0))]
- y <- y |> select(all_of(names_y))
- return(y)
- })()
- ### Names
- # namesAgg0 <- df_aggImpacts |> names()
- ### Columns
- groupByCols <- groupByCols[which(!(groupByCols %in% c(dropCols0)))]
- summaryCols <- summaryCols[which(!(summaryCols %in% c(dropCols0)))]
- summaryCol1 <- summaryCols[1]
- standardCols <- standardCols[which(!(standardCols %in% c(dropCols0)))]
- ### GroupByCols
- groupCols0 <- groupByCols[which(!(groupByCols %in% c("impactType")))]
- # nGroupCols0 <- groupCols0 |> length()
-
+ df_agg <- df_agg |> ungroup()
+ ### Grouping columns
+ group0 <- groupByCols |> (function(y){y[!(y %in% c("impactType", yearCol0))]})()
+ group0 <- group0 |> c("year")
### Separate into observations that have a single impact type and those with multiple impacts
### Rename impact type for those with one impact
- df_aggImpacts1 <- df_aggImpacts |> filter(impactType=="N/A") |> mutate(impactType="all")
- df_aggImpactsN <- df_aggImpacts |> filter(impactType!="N/A")
- # "aggregate_impacts: got here2" |> print()
- ### Remove df_aggImpacts
- rm("df_aggImpacts")
+ df_imp1 <- df_agg |> filter(impactType!="N/A")
+ df_agg <- df_agg |> filter(impactType=="N/A") |> mutate(impactType="all")
### Summarize at impact types: Count number of impact types
- df_aggImpactsN <- df_aggImpactsN |> (function(w){
- w <- w |> as.data.frame()
- w$not_isNA <- (!is.na(w[,summaryCol1]))*1
- return(w)
- })()
+ df_imp1 <- df_imp1 |> (function(w){
+ w |> mutate(not_isNA = 1 * (!(w[[summaryCol1]] |> is.na())))
+ })()
### Calculate number of observations
- df_aggImpactsN <- df_aggImpactsN |>
- group_by_at(.vars=c(all_of(groupCols0), "year")) |>
- summarize_at(.vars=c(all_of(summaryCols), "not_isNA"), sum, na.rm=T) |>
- as.data.frame() |> ungroup()
-
- # "aggregate_impacts: got here3" |> print()
- df_aggImpactsN <- df_aggImpactsN |> mutate(not_isNA = (not_isNA > 0)*1)
- df_aggImpactsN <- df_aggImpactsN |> mutate(not_isNA = not_isNA |> na_if(0))
- df_aggImpactsN <- df_aggImpactsN |> (function(x){
- x[, summaryCols] <- x[, summaryCols]*x$not_isNA; return(x)
- })()
- df_aggImpactsN <- df_aggImpactsN |> select(-c("not_isNA"))
- df_aggImpactsN <- df_aggImpactsN |> mutate(impactType="all") |> as.data.frame()
- # "aggregate_impacts: got here4" |> print()
- ### Add to impacts
- # df_aggImpacts <- df_oneImpact |> rbind(df_nImpacts) |> mutate(impactType="all")
- # rm("df_oneImpact", "df_nImpacts")
- df_aggImpacts <- df_aggImpacts1 |> rbind(df_aggImpactsN)
- rm("df_aggImpacts1", "df_aggImpactsN", "groupCols0")
+ sum0 <- summaryCols |> c("not_isNA")
+ df_imp1 <- df_imp1 |>
+ group_by_at(c(group0)) |>
+ summarize_at(c(sum0), sum, na.rm=T) |> ungroup()
+ rm(sum0)
+ ### Adjust values & drop column
+ df_imp1 <- df_imp1 |> mutate(not_isNA = (not_isNA > 0) * 1)
+ df_imp1 <- df_imp1 |> mutate(not_isNA = not_isNA |> na_if(0))
+ df_imp1 <- df_imp1 |> (function(x){
+ x[, summaryCols] <- x[, summaryCols] * x[["not_isNA"]]; return(x)
+ })()
+ ### Drop columns and mutate values
+ df_imp1 <- df_imp1 |> select(-c("not_isNA"))
+ df_imp1 <- df_imp1 |> mutate(impactType="all") #|> as.data.frame()
+ ### Bind values
+ df_agg <- df_agg |> rbind(df_imp1)
+ rm(df_imp1)
# "aggregate_impacts: got here5" |> print()
} ### End if impactType in aggLevels
-
- ###### Join Base Scenario Info with Aggregated Data ######
- ### Join national info with population
- # "aggregate_impacts: got here6" |> print()
- # df_base |> head() |> glimpse() |> print()
- regionalPop <- regionalPop |> mutate(year = year |> as.numeric())
- df_base <- baseScenario |> mutate(year = year |> as.numeric())
- df_base <- df_base |> left_join(regionalPop , by=c("year"))
- df_base <- df_base |> left_join(driverScenario, by=c("year"))
- rm("regionalPop", "baseScenario", "driverScenario")
-
- # df_base |> dim() |> print() ### 1470 rows, 13 columns
+ # "got here4" |> print()
+
+ ###### Join Base Scenario Info ######
+ ### Join base scenario with driver scenario
+ ### Join base scenario with population scenario
+ join0 <- c(yearCol0)
+ arrange0 <- c("region") |> c(stateCols0) |> c("model_type") |> c(yearCol0)
+ df_base <- baseScenario |> left_join(driverScenario, by=c(join0), relationship="many-to-many")
+ df_base <- df_base |> left_join(regionalPop , by=c(join0))
+ df_base <- df_base |> arrange_at(c(arrange0))
+ rm(regionalPop, baseScenario, driverScenario)
+ rm(join0, arrange0)
+
+ ### Join base scenario with aggregated info
+ # "got here5" |> print()
### Names
- aggNames <- df_aggImpacts |> names(); #aggNames |> print()
- svNames <- co_sector_variants |> names(); #svNames |> print()
- svJoin <- c("model_type", "sector", "variant")
- svDrop <- svNames[which((svNames %in% aggNames) & !(svNames %in% svJoin))]
- # "aggregate_impacts: got here7" |> print()
-
- df_return <- df_aggImpacts |> left_join(co_sector_variants |> select(-c(all_of(svDrop))), by = c(all_of(svJoin)))
- df_return <- df_return |> left_join(df_base , by = c("year", "region", "model_type"))
- rm("df_aggImpacts", "svDrop", "svJoin", "svNames")
- ###### Reformat sectorprimary and includeaggregate, which were converted to character
- c_aggColumns <- c("sectorprimary", "includeaggregate") |> (function(y){y[which(y %in% names(df_return))]})()
- if(length(c_aggColumns)){
- df_return <- df_return |> mutate_at(.vars=c(all_of(c_aggColumns)), as.numeric)
- }
+ names0 <- df_agg |> names(); #aggNames |> print()
+ names1 <- co_sectorVars |> names()
+ join0 <- co_sectorVars |> names() |> (function(y){y[y %in% names0]})()
+ join1 <- df_base |> names() |> (function(y){y[y %in% c(names0, names1)]})()
+ df_agg <- df_agg |> left_join(co_sectorVars, by=c(join0))
+ df_agg <- df_agg |> left_join(df_base , by=c(join1))
+ rm(names0, names1, join0, join1)
+
+ ###### Format Columns ######
+ # ###### Reformat sectorprimary and includeaggregate, which were converted to character
+ # mutate0 <- c("sectorprimary", "includeaggregate")
+ # mutate0 <- mutate0[mutate0 %in% names(df_agg)]
+ # doMutate <- mutate0 |> length() > 0
+ # if(doMutate){df_agg <- df_agg |> mutate_at(c(mutate0), as.numeric)}
+ # if(doMutate){df_agg <- df_agg |> mutate_at(c(mutate0), as.numeric)}
+ mutate0 <- baseCols |> c(popCol0) |> c("driverValue")
+ mutate0 <- mutate0 |> c(summaryCols)
+ mutate0 <- mutate0 |> c("sectorprimary", "includeaggregate")
+ mutate0 <- mutate0 |> unique()
+ mutate0 <- mutate0 |> (function(y){y[y %in% (df_agg |> names())]})()
+ doMutate <- (mutate0 |> length()) > 0
+ if(doMutate){df_agg <- df_agg |> mutate_at(c(mutate0), as.numeric)}
###### Order Columns ######
### Order the data frame and ungroup
### Column indices of columns used in ordering
- return_names <- df_return |> names()
- orderColNames <- c(groupByCols, "year") |> (function(y){y[which(y %in% return_names)]})() #; "got here10" |> print()
- df_return <- df_return |> arrange_at(.vars=c(all_of(orderColNames)))
+ # df_agg |> names() |> print(); groupByCols |> print()
+ arrange0 <- groupByCols |> c(yearCol0) |> unique()
+ df_agg <- df_agg |> arrange_at(c(arrange0))
+ df_agg <- df_agg |> select(any_of(standardCols))
+ df_agg <- df_agg |> ungroup()
###### Return ######
- ### Grouping columns, driver columns, scenario columns
- ### Make sure data is ungrouped and a data frame object
- df_return <- df_return |> select( all_of(standardCols))
- df_return <- df_return |> ungroup() |> as.data.frame()
-
### Return object
# if(msgUser) message("\n", "Finished...")
- return(df_return)
+ return(df_agg)
}
diff --git a/FrEDI/R/get_sectorInfo.R b/FrEDI/R/get_sectorInfo.R
index d7eb5223..f3cf13aa 100644
--- a/FrEDI/R/get_sectorInfo.R
+++ b/FrEDI/R/get_sectorInfo.R
@@ -49,7 +49,8 @@ get_sectorInfo <- function(
if(is.null(gcmOnly )){gcmOnly <-F}
if(is.null(slrOnly )){slrOnly <-F}
# co_sectorsRef$sector_label
- assign("co_sectorsRef", rDataList[["co_sectors"]])
+ # assign("co_sectorsRef", rDataList[["co_sectors"]])
+ co_sectorsRef <- "co_sectorsRef" |> get_frediDataObj("frediData")
co_sectorsRef <- co_sectorsRef |>
select(-c("sector_id")) |>
diff --git a/FrEDI/R/import_inputs.R b/FrEDI/R/import_inputs.R
index 1ea0cc71..af4f49b1 100644
--- a/FrEDI/R/import_inputs.R
+++ b/FrEDI/R/import_inputs.R
@@ -81,7 +81,8 @@ import_inputs <- function(
popfile = NULL,
gdpfile = NULL,
temptype = "conus", ### "global", or "conus" (default)
- popform = "wide" ### "wide" or "long" ### Previously: "gather", "spread"
+ popform = "wide", ### "wide" or "long" ### Previously: "gather", "spread"
+ byState = FALSE
){
###### Messaging ######
hasAnyInputs <- list(tempfile, slrfile, popfile, gdpfile) |>
@@ -103,6 +104,7 @@ import_inputs <- function(
popform_default <- "wide"
popform <- ifelse(is.null(popform), popform_default, tolower(popform))
wide_pop <- popform=="wide"
+ if(byState){geo_msg <- " state..."} else{geo_msg <- " region..."}
### Set temperature type default and set temperature type to default if none
### is declared. Check whether inputs temperatures are already in CONUS degrees
@@ -113,7 +115,9 @@ import_inputs <- function(
###### Initialize Inputs List ######
### Get input scenario info: co_inputScenarioInfo
name_dfScenarioInfo <- "co_inputScenarioInfo"
- assign(name_dfScenarioInfo, rDataList[[name_dfScenarioInfo]])
+ assign(name_dfScenarioInfo, rDataList[["regionData"]][["data"]][[name_dfScenarioInfo]])
+ name_stateInfo <- "co_states"
+ assign(name_stateInfo, rDataList[["frediData"]][["data"]][[name_stateInfo]])
input_names_vector <- co_inputScenarioInfo$inputName
num_inputNames <- co_inputScenarioInfo |> nrow()
@@ -139,9 +143,16 @@ import_inputs <- function(
valueCol_i <- inputInfo_i$valueCol |> unique()
### Initialize column names
numCols_i <- colNames_i <- c("year", valueCol_i)
- ### Add region column
+ cols0 <- c("region")
+ statecols0 <- c("region", "state", "postal")
+ if(byState){
+ cols0 <- statecols0
+ colNames_i <- numCols_i <- c("year", "state_pop")
+ }
+
+ ### Add state/region columns as needed
if(region_i == 1){
- colNames_i <- c(colNames_i[1], "region", colNames_i[2])
+ colNames_i <- c(colNames_i[1], cols0, colNames_i[2])
}
###### Initialize Results List Element ######
@@ -174,16 +185,26 @@ import_inputs <- function(
###### Gather population inputs ######
if(input_i=="pop" & wide_pop){
msg3 |> paste0("User specified `popform='wide'`...") |>
- paste0("Gathering population by region...") |>
+ paste0("Gathering population by", geo_msg) |>
message()
names(df_input_i)[1] <- colNames_i[1]
- df_input_i <- df_input_i |> gather(key = "region", value="reg_pop", -year)
+ if(byState){
+ df_input_i <- df_input_i |> gather(key = "state", value="state_pop", -year)
+ } else{
+ df_input_i <- df_input_i |> gather(key = "region", value="reg_pop", -year)
+ }
}
###### Standardize All Columns ######
### Rename Inputs and Convert all columns to numeric
### Rename Inputs and Convert all columns to numeric
+ if(byState){
+ df_input_i <- df_input_i |>
+ left_join(co_states, by = c("state" = "state"), suffix = c("", ".y")) |>
+ select(colNames_i)
+ }
+
df_input_i <- df_input_i |>
rename_inputs(colNames_i) |>
mutate_all(as.character) |>
diff --git a/FrEDI/R/run_fredi.R b/FrEDI/R/run_fredi.R
index 59935840..d727a151 100644
--- a/FrEDI/R/run_fredi.R
+++ b/FrEDI/R/run_fredi.R
@@ -15,6 +15,7 @@
#' @param maxYear=2090 A numeric value indicating the maximum year for the analysis.
#' @param thru2300 A ` TRUE/FALSE` shortcut that overrides the maxYear argument to run the model to 2300.
#' @param outputList A ` TRUE/FALSE` value indicating whether to output results as a data frame object (`outputList=FALSE`, default) or to return a list of objects (`outputList=TRUE`) that includes information about model provenance (including input arguments and input scenarios) along with the data frame of results.
+#' @param byState A `TRUE/FALSE` value indicating whether to run at the state level for available sectors (default=`FALSE`).
#' @param silent A `TRUE/FALSE` value indicating the level of messaging desired by the user (default=`TRUE`).
#'
#'
@@ -142,27 +143,27 @@ run_fredi <- function(
inputsList = list(tempInput=NULL, slrInput=NULL, gdpInput=NULL, popInput=NULL), ### List of inputs
sectorList = NULL, ### Vector of sectors to get results for
aggLevels = c("national", "modelaverage", "impactyear", "impacttype"), ### Aggregation levels
- elasticity = 0.4, ### Override value for elasticity for economic values
+ # elasticity = 0.4, ### Override value for elasticity for economic values
+ elasticity = NULL, ### Override value for elasticity for economic values
maxYear = 2090,
thru2300 = FALSE,
outputList = FALSE, ### Whether to return input arguments as well as results. [If TRUE], returns a list instead of a data frame
+ # byState = FALSE, ### Whether to run at state level for available sectors
+ allCols = FALSE, ### Whether to include additional columns in output
silent = TRUE ### Whether to message the user
){
-
-
###### Set up the environment ######
### Level of messaging (default is to message the user)
- silent <- ifelse(is.null(silent), T, silent)
+ silent <- silent |> is.null() |> ifelse(T, silent)
msgUser <- !silent
- ### Uncomment for testing
- testing <- FALSE ### Whether to include scaled impact values
- doPrimary <- F ### whether to filter to primary impacts
+ ### Uncomment for allCols
+ doPrimary <- F ### whether to filter to primary impacts
### Model years and NPD (FrEDI past 2090)
refYear <- 2090
npdYear <- 2300
- maxYear <- ifelse(is.null(maxYear), refYear, maxYear)
- maxYear0 <- ifelse(thru2300, npdYear, maxYear)
- do_npd <- (maxYear0 > refYear)
+ maxYear <- maxYear |> is.null() |> ifelse(refYear, maxYear)
+ maxYear0 <- thru2300 |> ifelse(npdYear, maxYear)
+ do_npd <- maxYear0 > refYear
###### Return List ######
### Initialize list to return
@@ -171,9 +172,19 @@ run_fredi <- function(
###### Create file paths ######
### Assign data objects to objects in this namespace
- ### Configuration and data list
- for(i in 1:length(fredi_config)) assign(names(fredi_config)[i], fredi_config[[i]])
- for(i in 1:length(rDataList )) assign(names(rDataList)[i], rDataList[[i]])
+ for(list_i in rDataList){
+ ### Assign list elements to name space
+ name_i <- list_i[["name"]]
+ data_i <- list_i[["data"]]
+ ### Assign list elements to name space
+ name_i |> assign(data_i)
+ ### Assign elements of list i to name space
+ names_i <- data_i |> names()
+ for(name_j in names_i){name_j |> assign(data_i[[name_j]]); rm(name_j)}
+ rm(list_i, name_i, data_i)
+ } ### End for(i in rDataList)
+ ### Assign FrEDI config
+ for(name_i in fredi_config |> names()){assign(name_i, fredi_config[[name_i]]); rm(name_i)}
### Years
maxYear <- maxYear0
@@ -182,64 +193,80 @@ run_fredi <- function(
###### Aggregation level ######
### Types of summarization to do: default
- doPrimary <- ifelse(is.null(doPrimary), FALSE, doPrimary)
- aggList0 <- c("national", "modelaverage", "impactyear", "impacttype")
- if(!is.null(aggLevels)){
+ doPrimary <- doPrimary |> is.null() |> ifelse(FALSE, doPrimary)
+ aggList0 <- c("national", "modelaverage", "impactyear", "impacttype")
+ if(aggLevels |> is.null()){
+ aggLevels <- aggList0
+ } else{
### Aggregation levels
aggLevels <- aggLevels |> tolower()
aggLevels <- aggLevels[which(aggLevels %in% c(aggList0, "all", "none"))]
doAgg <- "none" %in% aggLevels
### If none specified, no aggregation (only SLR interpolation)
### Otherwise, aggregation depends on length of agg levels
- if ("none" %in% aggLevels) {aggLevels <- c() }
+ if ("none" %in% aggLevels) {aggLevels <- c()}
else if("all" %in% aggLevels) {aggLevels <- aggList0}
- else {requiresAgg <- length(aggLevels) > 0}
- } ### End if(!is.null(aggLevels))
- else{aggLevels <- aggList0}
- requiresAgg <- length(aggLevels) > 0
+ else {requiresAgg <- aggLevels |> length() > 0}
+ } ### End else(!is.null(aggLevels))
+ requiresAgg <- aggLevels |> length() > 0
###### Sectors List ######
- ### Sector names
- sector_names <- co_sectors$sector_id
- sector_labels <- co_sectors$sector_label
+ ### Sector names & labels
+ sectorIds <- co_sectors[["sector_id" ]]
+ sectorLbls <- co_sectors[["sector_label"]]
### Initialize sector list if the sectors list is null
- if(is.null(sectorList)){
- sectorList <- sector_names
- } ### End if(is.null(sectorList))
- else{
+ if(sectorList |> is.null()){
+ sectorList <- sectorIds
+ } else{
+ # sectorIds |> print()
+ ### Get lower case versions
+ sectors0 <- sectorIds |> tolower()
+ sectors1 <- sectorLbls |> tolower()
+ sectors2 <- sectorList |> tolower()
+ # sectors0 |> print(); sectors1 |> print(); sectors2 |> print()
### Compare inputs to names and labels in the data
### Subset to sector list in sector names
- which_sectors <- which(
- (tolower(sector_names) %in% tolower(sectorList)) |
- (tolower(sector_labels) %in% tolower(sectorList))
- )
- sectorList <- sector_names[which_sectors]
+ which0 <- (sectors0 %in% sectors2) | (sectors1 %in% sectors2)
+ sectorIds <- sectorIds [which0]
+ sectorList <- sectorLbls[which0]
### Message users about missing sectors
- which_notSectors <- which(!(
- (tolower(sectorList) %in% tolower(sector_names)) |
- (tolower(sectorList) %in% tolower(sector_labels))
- ) )
- missing_sectors <- sectorList[which_notSectors]
+ which1 <- (sectors2 %in% sectors0) | (sectors2 %in% sectors1)
+ na_sectors <- sectorList[!which1]
+ rm(sectors0, sectors1, sectors2, which0, which1)
### Message the user
- if(length(missing_sectors)>=1){
- message(
- "Warning! Error in `sectorList`.",
- "\n\t", "Impacts are not available for the following: '",
- "\n\t\t", paste(missing_sectors, collapse= "', '"), "'...",
- "\n\t", "Available sectors: '",
- "\n\t\t", paste(sector_names, collapse= "', '"), "'"
- ) ### End message
+ if(na_sectors |> length() >= 1){
+ paste0("Warning! Error in `sectorList`:") |> message()
+ "\n\t" |> paste0(
+ "Impacts are not available for the following sectors: '",
+ na_sectors |> paste(collapse= "', '"),
+ "'..."
+ ) |> message()
+ "\n\t" |> paste0(
+ "Available sectors: '",
+ sectorLbls |> paste(collapse= "', '"),
+ "'."
+ ) |> message() ### End message
} ### End if(length(missing_sectors)>=1)
} ### End else(is.null(sectorList))
### Number of sectors
+ # sectorList |> print()
num_sectors <- sectorList |> length()
- ###### Load Inputs ######
+ ### Filter to sectors
+ co_slrScalars <- co_slrScalars |> filter(sector %in% sectorIds)
+ df_sectorsInfo <- df_sectorsInfo |> filter(sector %in% sectorIds)
+
+ ###### By State ######
+ byState <- TRUE
+ popCol0 <- byState |> ifelse("state_pop", "reg_pop")
+ if(byState){stateCols0 <- c("state", "postal")} else{stateCols0 <- c()}
+
+ ###### Inputs ######
+ ###### ** Load Inputs ######
### Create logicals and initialize inputs list
- list_inputs <- co_inputScenarioInfo$inputName
+ list_inputs <- co_inputScenarioInfo[["inputName"]]
num_inputNames <- co_inputScenarioInfo |> nrow()
-
- if(is.null(inputsList)) {inputsList <- list()}else{message("Checking input values...")}
+ if(inputsList |> is.null()) {inputsList <- list()} else{"Checking input values..." |> message()}
### Iterate over the input list
# if(!is.null(inputsList)){
### Assign inputs to objects
@@ -253,25 +280,30 @@ run_fredi <- function(
### Min and Max Values
min_i <- inputInfo_i$inputMin |> unique()
max_i <- inputInfo_i$inputMax |> unique()
- ###### Column Info ######
+ ### Column Info
region_i <- inputInfo_i$region |> unique()
valueCol_i <- inputInfo_i$valueCol |> unique()
+ if(region_i == 1){regCol_i <- c("region")} else{regCol_i <- c()}
### Initialize column names
- numCols_i <- colNames_i <- c("year", valueCol_i) #; print(colNames_i)
- ### Add region column
- if(region_i == 1){
- colNames_i <- c(colNames_i[1], "region", colNames_i[2])
- }
+ colNames_i <- "year" |> c(regCol_i) |> c(valueCol_i) #; print(colNames_i)
+ numCols_i <- colNames_i |> length()
has_i <- paste0("has_", input_i, "Update")
# has_update_i <- is.null(inputsList[[inputName_i]])
df_input_i <- inputsList[[inputName_i]]
- has_update_i <- !is.null(df_input_i)
- ###### Assign inputs to objects ######
- assign(has_i, has_update_i)
- assign(inputName_i, df_input_i)
+ has_update_i <- !(df_input_i |> is.null())
+ ### Assign inputs to objects
+ has_i |> assign(has_update_i)
+ inputName_i |> assign(df_input_i )
} ### End iterate over inputs
# }
+ ### Message user about elasticity
+ if(!(elasticity |> is.null()) & !(elasticity |> is.numeric())){
+ paste0("\t", "Incorrect value type provided for argument 'elasticity'...") |> message()
+ paste0("\t\t", "Using default elasticity values.") |> message()
+ } ### End if
+
+
### Update arguments list
argsList[["sectorList"]] <- sectorList
argsList[["aggLevels" ]] <- aggLevels
@@ -279,10 +311,10 @@ run_fredi <- function(
argsList[["maxYear" ]] <- maxYear
argsList[["thru2300" ]] <- thru2300
### Add to return list and remove intermediate arguments
- returnList[["arguments" ]] <- argsList
+ returnList[["arguments"]] <- argsList
rm("argsList")
- ###### Temperature Scenario ######
+ ###### ** Temperature Scenario ######
### User inputs: temperatures have already been converted to CONUS temperatures. Filter to desired range.
### Name the reference year temperature
### Add the point where impacts are zero
@@ -296,9 +328,9 @@ run_fredi <- function(
### Remove missing values of years, temperatures
### Zero out series at the temperature reference year
tempInput <- tempInput |> select(c("year", "temp_C"))
- tempInput <- tempInput |> filter(!is.na(temp_C) & !(is.na(year)))
+ tempInput <- tempInput |> filter(!(temp_C |> is.na()) & !(year |> is.na()))
tempInput <- tempInput |> filter(year > refYear_temp)
- tempInput <- data.frame(year= refYear_temp, temp_C = 0) |> rbind(tempInput)
+ tempInput <- tibble(year= refYear_temp, temp_C = 0) |> rbind(tempInput)
### Interpolate annual values and then drop region
temp_df <- tempInput |> (function(x){
@@ -324,7 +356,7 @@ run_fredi <- function(
} ### End else(has_tempUpdate)
# temp_df |> nrow() |> print()
- ###### SLR Scenario ######
+ ###### ** SLR Scenario ######
### Year where SLR impacts are zero
refYear_slr <- (co_modelTypes |> filter(modelUnitType=="slr"))$modelRefYear |> unique()
# co_modelTypes |> names() |> print()
@@ -336,15 +368,15 @@ run_fredi <- function(
if(has_slrUpdate){
message("Creating SLR scenario from user inputs...")
slrInput <- slrInput |> select(c("year", "slr_cm"))
- slrInput <- slrInput |> filter(!is.na(slr_cm) & !is.na(year))
+ slrInput <- slrInput |> filter(!(slr_cm |> is.na()) & !(year |> is.na()))
slrInput <- slrInput |> filter(year > refYear_slr)
- slrInput <- data.frame(year= refYear_slr, slr_cm = 0) |> rbind(slrInput)
+ slrInput <- tibble(year= refYear_slr, slr_cm = 0) |> rbind(slrInput)
### Interpolate values
slr_df <- slrInput |> (function(x){
minYear_x <- x$year |> min()
interpYrs <- refYear_slr:maxYear
### Interpolate annual values
- x_interp <- x |> interpolate_annual(
+ x_interp <- x |> interpolate_annual(#wm same as temps above, I think fine to leave
years = interpYrs,
column = "slr_cm",
rule = 1:2
@@ -358,49 +390,54 @@ run_fredi <- function(
### Then convert global temps to SLR
else{
message("Creating SLR scenario from temperature scenario...")
- slr_df <- temp_df |> (function(x){temps2slr(temps = x$temp_C_global, years = x$year)})()
+ # slr_df <- temp_df %>% (function(x){temps2slr(temps = x$temp_C_global, years = x$year)})
+ slr_df <- temps2slr(temps = temp_df$temp_C_global, years = temp_df$year)
} ### End else(has_slrUpdate)
# slr_df |> nrow() |> print()
# slr_df |> head() |> print()
# slr_df$year |> range() |> print()
- ###### Driver Scenario ######
- ### Format the temperature and SLR values
- temp_df <- temp_df |> select(c("year", "temp_C_conus")) |>
- rename(modelUnitValue = temp_C_conus) |> mutate(modelType="gcm")
- slr_df <- slr_df |> select(c("year", "slr_cm")) |>
- rename(modelUnitValue=slr_cm) |> mutate(modelType="slr")
-
+ ###### ** Driver Scenario ######
+ ### Select columns
+ temp_df <- temp_df |> select(c("year", "temp_C_conus"))
+ slr_df <- slr_df |> select(c("year", "slr_cm"))
+ ### Rename columns
+ temp_df <- temp_df |> rename(modelUnitValue = temp_C_conus)
+ slr_df <- slr_df |> rename(modelUnitValue = slr_cm )
+ ### Add model type
+ temp_df <- temp_df |> mutate(modelType="gcm")
+ slr_df <- slr_df |> mutate(modelType="slr")
###### Combine Scenarios and bind with the model type info
### R Bind the SLR values
### Join with info about models
### Filter to the years used by the R tool
co_modelTypes <- co_modelTypes |> rename(modelType = modelType_id)
co_modelType0 <- co_modelTypes |> select(c("modelType"))
- df_drivers <- temp_df |> rbind(slr_df)
- df_drivers <- df_drivers |> filter( year >= minYear) |> filter(year <= maxYear)
+ df_drivers <- temp_df |> rbind(slr_df)
+ df_drivers <- df_drivers |> filter(year >= minYear)
+ df_drivers <- df_drivers |> filter(year <= maxYear)
# df_drivers |> names() |> print(); co_modelType0 |> names() |> print()
- df_drivers <- df_drivers |> left_join(co_modelType0, by = "modelType")
+ df_drivers <- df_drivers |> left_join(co_modelType0, by="modelType")
### Update inputs in outputs list
returnList[["driverScenarios"]][["temp"]] <- temp_df
returnList[["driverScenarios"]][["slr" ]] <- slr_df
### Remove intermediate values
rm("co_modelType0", "temp_df", "slr_df")
- ###### Socioeconomic Scenario ######
+ ###### ** Socioeconomic Scenario ######
### Update the socioeconomic scenario with any GDP or Population inputs and message the user
### Reformat GDP inputs if provided, or use defaults
gdpCols0 <- c("year", "gdp_usd")
- popCols0 <- c("year", "region", "reg_pop")
+ popCols0 <- c("year", "region") |> c(stateCols0, popCol0)
if(has_gdpUpdate){
message("Creating GDP scenario from user inputs...")
- gdp_df <- gdpInput |> filter(!is.na(gdp_usd)) |> filter(!is.na(year))
- gdp_df <- gdp_df |> interpolate_annual(years= c(list_years), column = "gdp_usd", rule = 2) |> select(-c("region"))
+ gdp_df <- gdpInput |> filter(!(is.na(gdp_usd)) & !(year |> is.na()))
+ gdp_df <- gdp_df |> interpolate_annual(years=list_years, column="gdp_usd", rule = 2) |> select(-c("region"))
rm("gdpInput")
} ### End if(has_gdpUpdate)
else{
message("No GDP scenario provided...Using default GDP scenario...")
- gdp_df <- gdp_default |> select(c(all_of(gdpCols0)))
+ gdp_df <- gdp_default |> select(all_of(gdpCols0))
rm("gdp_default")
} ### End else(has_gdpUpdate)
@@ -408,32 +445,32 @@ run_fredi <- function(
if(has_popUpdate){
message("Creating Population scenario from user inputs...")
### Standardize region and then interpolate
- pop_df <- popInput |> mutate(region = gsub(" ", ".", region))
- pop_df <- pop_df |> interpolate_annual(years= c(list_years), column = "reg_pop", rule = 2) |> ungroup()
+ pop_df <- popInput |> mutate(region = gsub(" ", ".", region))
+ pop_df <- pop_df |> interpolate_annual(years=list_years, column=popCol0, rule=2, byState=byState) |> ungroup()
# pop_df |> glimpse()
### Calculate national population
- national_pop <- pop_df |> group_by_at(.vars=c("year")) |> summarize_at(.vars=c("reg_pop"), sum, na.rm=T) |> ungroup()
- national_pop <- national_pop |> rename(national_pop = reg_pop)
+ national_pop <- pop_df |> group_by_at(.vars=c("year")) |> summarize_at(.vars=c(popCol0), sum, na.rm=T) |> ungroup()
+ national_pop <- national_pop |> rename_at(vars(popCol0), ~"national_pop")
# national_pop |> glimpse()
rm("popInput")
} ### if(has_popUpdate)
else{
message("Creating Population scenario from defaults...")
### Select columns and filter
- pop_df <- pop_default |> select(c(all_of(popCols0)))
+ pop_df <- pop_default |> select(all_of(popCols0))
national_pop <- national_pop_default |> select("year", "national_pop")
rm("pop_default", "national_pop_default")
} ### End else(has_popUpdate)
### Message user
if(has_gdpUpdate|has_popUpdate){if(msgUser){messages_data[["updatePopGDP"]]}}
### Filter to correct years
- gdp_df <- gdp_df |> filter(year >= minYear) |> filter(year <= maxYear)
- pop_df <- pop_df |> filter(year >= minYear) |> filter(year <= maxYear)
- national_pop <- national_pop |> filter(year >= minYear) |> filter(year <= maxYear)
+ gdp_df <- gdp_df |> filter(year >= minYear & year <= maxYear)
+ pop_df <- pop_df |> filter(year >= minYear & year <= maxYear)
+ national_pop <- national_pop |> filter(year >= minYear & year <= maxYear)
### National scenario
# gdp_df |> glimpse(); national_pop |> glimpse();
national_scenario <- gdp_df |> left_join(national_pop, by=c("year"))
- national_scenario <- national_scenario |> mutate(gdp_percap = gdp_usd/national_pop)
+ national_scenario <- national_scenario |> mutate(gdp_percap = gdp_usd / national_pop)
### Update inputs in outputs list
returnList[["driverScenarios"]][["gdp"]] <- gdp_df
returnList[["driverScenarios"]][["pop"]] <- pop_df
@@ -441,475 +478,296 @@ run_fredi <- function(
rm("gdp_df", "national_pop")
### Updated scenario
- updatedScenario <- national_scenario |> left_join(pop_df, by=c("year"))
- updatedScenario <- updatedScenario |> arrange_at(.vars=c("region", "year"))
- # updatedScenario |> group_by_at(.vars=c("region", "year")) |> summarize(n=n(), .groups="keep") |> ungroup() |> filter(n>1) |> nrow() |> print()
+ join0 <- "year"
+ arrange0 <- "region" |> c(stateCols0) |> c(join0)
+ updatedScenario <- national_scenario |> left_join(pop_df, by=join0)
+ updatedScenario <- updatedScenario |> arrange_at(c(arrange0))
+ rm(join0, arrange0)
###### Update Scalars ######
if(msgUser) message("", list_messages[["updateScalars"]]$try, "")
- # mutateCols0 <- c("scalarName", "scalarType", "national_or_regional")
- # mutateVals0 <- c("reg_pop", "physScalar", "regional")
### Filter main scalars to correct years and filter out regional population
- ### Join with regPopScalar
df_mainScalars <- df_mainScalars |> filter(year >= minYear) |> filter(year <= maxYear)
- df_mainScalars <- df_mainScalars |> filter(scalarName!="reg_pop")
- df_mainScalars <- df_mainScalars |> (function(df0, pop0 = pop_df, popCols = popCols0){
- ### Format population
- pop0 <- pop0 |> select(c(all_of(popCols)))
- pop0 <- pop0 |> rename(value=reg_pop)
- pop0 <- pop0 |> mutate(scalarName = "reg_pop")
- pop0 <- pop0 |> mutate(scalarType = "physScalar")
- pop0 <- pop0 |> mutate(national_or_regional = "regional")
- ### Bind regional population with other scalars
- df0 <- df0 |> rbind(pop0)
- return(df0)
- })()
-
- ###### NPD Scalars ######
- if(do_npd){
- ###### Scalars for SLR past 2090 ######
- ### Scalars for SLR
- # slr_sectors <- c("CoastalProperties", "HTF")
- co_npdScalars <- data.frame(sector = c("CoastalProperties", "HTF")) |>
- mutate(npd_scalarType = c("gdp_percap", "gdp_usd")) |>
- mutate(c1 = c(1, 0.1625)) |>
- mutate(exp0 = c(ifelse(is.null(elasticity), 0.45, elasticity), 1)) |>
- mutate(c2 = c(0, 0.8375))
-
- ### Columns
- select0 <- c("year", "gdp_usd", "gdp_percap")
- gather0 <- c("gdp_usd", "gdp_percap")
- ### Calculate scalars and gather scalars
- npdScalars <- national_scenario |> filter(year >= refYear)
- npdScalars <- npdScalars |> select(c(all_of(select0)))
- npdScalars <- npdScalars |> gather(key = "npd_scalarType", value="npd_scalarValue", c(all_of(gather0)))
- rm("select0", "gather0")
- ### Get 2090 values and then bind them
- npdScalars <- npdScalars |> (function(npd0, co_npd = co_npdScalars){
- #### Columns
- join0 <- c("npd_scalarType")
- ### Filter to year and drop year column, rename scalar value
- npd1 <- npd0 |> filter(year == refYear) |> select(-c("year"))
- npd1 <- npd1 |> rename(npd_scalarValueRef = npd_scalarValue)
- ### Join with scalar and sector info
- npd0 <- npd0 |> left_join(npd1, by = c(all_of(join0)))
- npd0 <- npd0 |> left_join(co_npd, by = c(all_of(join0)))
- return(npd0)
- })()
- ### Calculate scalar value and drop scalar columns
- select0 <- c("c1", "exp0", "npd_scalarValueRef")
- npdScalars <- npdScalars |> mutate(npd_scalarValue = c1 * (npd_scalarValue / npd_scalarValueRef)**exp0)
- npdScalars <- npdScalars |> select(-c(all_of(select0)))
- rm("select0")
- # (npdScalars |> filter(year > 2090))$year |> head() |> print()
- ### Join with regional population
- npdScalars <- npdScalars |> (function(npd0, pop0 = pop_df, refYear0 = refYear){
- ### Columns
- join0 <- c("year", "region", "reg_pop")
- ### Separate population
- pop1 <- pop0 |> filter(year >= refYear0) |> select(c(all_of(join0)))
- pop2 <- pop0 |> filter(year == refYear0) |> select(c(join0[2:3])) |> rename(reg_popRef = reg_pop)
- ### Join by year
- npd0 <- npd0 |> left_join(pop1, by = c("year"))
- ### Join by region
- npd0 <- npd0 |> left_join(pop2, by = c("region"))
- ### Return
- return(npd0)
- })()
- ### Calculate value and rename values
- npdScalars <- npdScalars |> mutate(npd_scalarValue = npd_scalarValue + c2 * (reg_pop / reg_popRef))
- npdScalars <- npdScalars |> select(-c("c2", "reg_pop", "reg_popRef"))
- ### Rename values
- npdScalars <- npdScalars |> mutate(econScalar = npd_scalarValue)
- npdScalars <- npdScalars |> mutate(physEconScalar = npd_scalarValue)
- npdScalars <- npdScalars |> select(-c("npd_scalarValue", "npd_scalarType"))
- npdScalars <- npdScalars |> filter(year > refYear)
- }
+ # df_mainScalars |> glimpse();
+ df_mainScalars <- df_mainScalars |> update_popScalars(updatedScenario)
+ # df_mainScalars1 |> glimpse();
+ # ### Message the user
+ # if(msgUser) {paste0("\t", messages_data[["calcScalars"]]$success) |> message()}
+ # # ### Message the user
+ # # if(msgUser) message("\t", list_messages[["updateScalars"]]$success)
+ # "got here" |> print()
###### Initialize Results ######
- ### Filter initial results to specified sectors
- ### Join to the updated base scenario
+ ### Initialized results: Join sector info and default scenario
### Calculate physical scalars and economic multipliers then calculate scalars
- if(!is.null(elasticity)){if(!is.numeric(elasticity)){
- message("\t", "Incorrect value type provided for argument 'elasticity'...")
- message("\t\t", "Using default elasticity values.")
- }}
-
- initialResults <- df_results0 |> filter(year >= minYear) |> filter(year <= maxYear)
- initialResults <- initialResults |> filter(sector %in% sectorList)
- rm("df_results0")
- # paste0("Initial Results: ", nrow(initialResults)) |> print(); initialResults |> glimpse()
- # updatedScenario |> glimpse()
-
- ### Update scalar values
- # initialResults$region |> unique() |> print(); updatedScenario$region |> unique() |> print()
- initialResults <- initialResults |> left_join(updatedScenario, by = c("year", "region"))
- initialResults <- initialResults |> match_scalarValues(df_mainScalars, scalarType="physScalar")
- initialResults <- initialResults |> get_econAdjValues(scenario = updatedScenario, multipliers=co_econMultipliers[,1])
- initialResults <- initialResults |> calcScalars(elasticity = elasticity)
- rm("df_mainScalars", "updatedScenario") ### df_mainScalars no longer needed
- # paste0("Initial Results: ", nrow(initialResults)) |> print(); initialResults |> head() |> glimpse()
-
- ###### Initialize Results for NPD ######
- if(do_npd){
- ### Get initial results for NPD
- initialResults_npd <- initialResults |> filter((sector %in% co_npdScalars$sector & year > refYear))
- initialResults_npd <- initialResults_npd |> select(-c("econScalar", "physEconScalar"))
- initialResults_npd <- initialResults_npd |> left_join(npdScalars, by = c("sector", "year", "region"));
- # ### Adjust NPD scalars
- initialResults <- initialResults |> filter(!(sector %in% co_npdScalars$sector & year > refYear))
- initialResults <- initialResults |> rbind(initialResults_npd)
- rm("initialResults_npd", "co_npdScalars", "npdScalars")
- }
- ### Message the user
- if(msgUser) message("\t", list_messages[["updateScalars"]]$success)
-
-
- ###### Scenario ID ######
+ # df_sectorsInfo |> glimpse(); #df_mainScalars |> glimpse(); df_mainScalars1 |> glimpse()
+ df_results0 <- updatedScenario |> initialize_resultsDf(
+ df_info = df_sectorsInfo,
+ df_scalars = df_mainScalars,
+ elasticity = elasticity
+ )
+ # df_mainScalars1 |> glimpse();
+ ### Filter to years
+ df_results0 <- df_results0 |> filter(year >= minYear & year <= maxYear)
+ # df_results0 <- df_results0 |> mutate(model_type=modelType)
+
+ ##### Scenario ID ######
+ ### Mutate model for SLR sectors
+ # df_results0 |> glimpse()
+ modelCols0 <- c("model_id", "model_dot", "model_underscore", "model_label")
+ co_models0 <- "co_models" |> get_frediDataObj("frediData")
+ ### Change model to interpolation for SLR models
+ group0 <- co_models0 |> names()
+ which_slr <- (co_models0[["modelType"]] |> tolower() == "slr") |> which()
+ co_models0[which_slr,modelCols0] <- "Interpolation"
+ co_models0 <- co_models0 |>
+ group_by_at(c(group0)) |>
+ summarize(n=n(), .groups="keep") |> ungroup() |>
+ select(-c("n"))
+ rm(group0, which_slr, modelCols0)
+ # "got here" |> print()
+ ### Join with initial results
+ join0 <- c("modelType")
+ df_results0 <- df_results0 |> left_join(co_models0, by=c(join0))
+ # df_results0 |> glimpse()
+ rm(join0, co_models0)
+ # return(df_results0)
+
### Create scenario ID and separate by model type
- initialResults <- initialResults |> mutate(model_type=modelType)
- initialResults_slr <- initialResults |> filter(modelType=="slr")
- initialResults <- initialResults |> filter(modelType!="slr")
- ### Number of GCM and SLR rows
- nrow_gcm <- initialResults |> nrow()
- nrow_slr <- initialResults_slr |> nrow()
+ include0 <- c("region") |> c(stateCols0) |> c("model_label")
+ df_results0 <- df_results0 |> get_scenario_id(include = include0)
###### Scaled Impacts ######
### Initialize and empty data frame df_scenarioResults
if(msgUser) message(list_messages[["scaledImpacts"]]$try)
if(msgUser) message("Calculating scaled impacts...")
- df_scenarioResults <- data.frame()
- impactSelectCols <- c("year", "scaled_impacts", "scenario_id")
+ df_scenarioResults <- tibble()
+ # df_results0 <- df_results0 |> mutate(model_type=modelType)
+ df_results0_gcm <- df_results0 |> filter(modelType!="slr")
+ df_results0_slr <- df_results0 |> filter(modelType=="slr")
+ rm(df_results0)
+
+ ### Number of GCM and SLR rows
+ nrow_gcm <- df_results0_gcm |> nrow()
+ nrow_slr <- df_results0_slr |> nrow()
+ # nrow_gcm |> c(nrow_slr) |> print()
- ###### GCM Scaled Impacts ######
+ ###### ** GCM Scaled Impacts ######
if(nrow_gcm){
- ### Drivers
- df_drivers_gcm <- df_drivers |> filter(modelType == "gcm")
- ### Get scenario id
- # initialResults_slr <- initialResults_slr |> get_scenario_id(include=c())
- initialResults <- initialResults |> left_join(co_models, by="modelType")
- initialResults <- initialResults |> get_scenario_id(include=c("model_dot", "region"))
- initialResults <- initialResults |> select(-c("model_type"))
- ### Get list of unique impact functions
- impFunNames <- list_impactFunctions |> names() |> unique()
- ### Check whether the scenario has an impact function (scenarios with all missing values have no functions)
- gcmAllFuncs <- initialResults$scenario_id |> unique()
- df_gcm_i <- data.frame(scenario_id = gcmAllFuncs)
- df_gcm_i <- df_gcm_i |> mutate(hasScenario = (scenario_id %in% impFunNames)*1)
- ### Figure out which have functions
- which_hasFunc <- which(df_gcm_i$hasScenario==1)
- gcmHasFuns <- length(which_hasFunc)>=1
- gcmNoFuns <- !(length(gcmAllFuncs) == length(which_hasFunc))
- # impFunNames[1:5] |> print(); gcmAllFuncs[1:5] |> print(); which_hasFunc |> head() |> print()
-
- ### Get impacts for scenario_ids that have functions
- if(gcmHasFuns){
- hasFunNames <- df_gcm_i[which_hasFunc, "scenario_id"] |> unique()
- hasFunsList <- list_impactFunctions[which(impFunNames %in% hasFunNames)]
- ### Get impacts
- imp_hasFuns <- hasFunsList |> interpolate_impacts(xVar = df_drivers_gcm$modelUnitValue, years = df_drivers_gcm$year)
- imp_hasFuns <- imp_hasFuns |> rename(modelUnitValue = xVar) |> filter(year>=minYear)
- imp_hasFuns <- imp_hasFuns |> select(c(all_of(impactSelectCols)))
- # df_scenarioResults |> names() |> print()
- df_scenarioResults <- df_scenarioResults |> rbind(imp_hasFuns)
- rm("hasFunNames", "hasFunsList", "imp_hasFuns")
- } #; return(df_i)
- if(gcmNoFuns){
- imp_noFuns <- df_gcm_i[-which_hasFunc,]
- imp_noFuns <- imp_noFuns |> mutate(scaled_impacts = NA, joinCol = 1)
- imp_noFuns <- imp_noFuns |> left_join(df_drivers_gcm |> mutate(joinCol = 1), by=c("joinCol"))
- imp_noFuns <- imp_noFuns |> select(c(all_of(impactSelectCols)))
- # df_scenarioResults |> names() |> print(); imp_noFuns |> names() |> print()
- df_scenarioResults <- df_scenarioResults |> rbind(imp_noFuns)
- rm("imp_noFuns")
- }
- rm("df_drivers_gcm", "gcmAllFuncs", "which_hasFunc", "gcmHasFuns", "gcmNoFuns")
- }
-
- ###### SLR Scaled Impacts ######
+ # df_results0_gcm |> glimpse()
+ df_gcm0 <- df_results0_gcm |> get_gcmScaledImpacts(df1=df_drivers)
+ df_scenarioResults <- df_scenarioResults |> rbind(df_gcm0)
+ # # df_scenarioResults |> glimpse()
+ # df_results0_gcm$scenario_id |> unique() |> head() |> print()
+ # (df_gcm0 |> filter(!is.na(scaled_impacts)))$scenario_id |> unique() |> head() |> print()
+ # df_scenarioResults$scenario_id |> unique() |> head() |> print()
+ rm(df_gcm0)
+ } ### End if(nrow_gcm)
+ # df_scenarioResults |> filter(!is.na(scaled_impacts)) |> nrow() |> print()
+ ###### ** SLR Scaled Impacts ######
if(nrow_slr){
# "got here1" |> print()
- ### Filter to appropriate number of years
- slrImpacts <- slrImpacts |> filter(year <= maxYear)
- slrExtremes <- slrExtremes |> filter(year <= maxYear)
- slrDrivers <- df_drivers |> filter(modelType=="slr") #|> rename(model_type=modelType)
-
- ###### ** SLR Scaled Impacts Above Max #######
- ### Examine driver values: combine with extremeSs
- slrMax <- (co_modelTypes |> filter(modelType=="slr"))$modelMaxOutput[1]
- ### Combine SLR with extremes and filter to appropriate years
- df_slrMax <- slrDrivers
- df_slrMax <- df_slrMax |> left_join(slrExtremes, by=c("year"))
- df_slrMax <- df_slrMax |> filter(modelUnitValue >= driverValue_ref)
- slrMaxYears <- df_slrMax |> get_uniqueValues(column="year")
- ### Calculate scaled impacts for values > slrMax
- df_slrMax <- df_slrMax |> mutate(deltaDriver = modelUnitValue - driverValue_ref)
- df_slrMax <- df_slrMax |> mutate(scaled_impacts = impacts_intercept + impacts_slope * deltaDriver)
- # df_slrMax |> filter(deltaDriver < 0) |> nrow() |> print()
-
- ###### ** SLR Other Scaled Impacts #######
- ### Get impacts and create scenario ID for values <= slrMax
- df_slrImpacts <- slrDrivers |> filter(!(year %in% slrMaxYears))
- df_slrImpacts <- df_slrImpacts |> left_join(slrImpacts, by=c("year"))
- # "got here2" |> print()
-
- ###### ** SLR Interpolation ######
- nrow_oth <- df_slrImpacts |> nrow()
- # nrow_oth |> print()
- if(nrow_oth){
- ### Group by cols
- cols0 <- c("modelType", "modelUnitValue")
- cols1 <- c("model_type", "driverValue")
- cols2 <- c("lower_model", "upper_model")
- ### Group by cols
- slr_names <- df_slrImpacts |> names()
- slrGroupByCols <- c("sector", "variant", "impactYear", "impactType", "model", "model_dot", "region", "scenario_id")
- slrGroupByCols <- slrGroupByCols[which(slrGroupByCols %in% slr_names)] |> c(cols1)
- # rm("slrGroupByCols", "slr_names")
- #### Interpolate driver values
- slrDrivers <- slrDrivers |> rename_at(.vars=c(all_of(cols0)), ~cols1)
- slrScenario <- slrDrivers |> filter(tolower(model_type)=="slr") |> select(-c("model_type"))
- # "got here3" |> print()
- slrScenario <- slrScenario |> slr_Interp_byYear()
- slrScenario <- slrScenario |> mutate_at(.vars=c(all_of(cols2)), function(y){gsub(" ", "", y)})
- # "got here4" |> print()
- # df_slrImpacts$model |> unique() |> print()
- # return(slrScenario)
- # df_slrImpacts |> names() |> print(); slrScenario |> names() |> print()
- ### Interpolate
- # df_slrImpacts |> filter(!is.na(scaled_impacts)) |> nrow() |> print()
- df_slrImpacts <- df_slrImpacts |> rename_at(.vars=c(all_of(cols0[2])), ~cols1[2])
- # "got here5" |> print()
- df_slrImpacts <- df_slrImpacts |> fredi_slrInterp(slr_x = slrScenario, groupByCols=slrGroupByCols)
- # "got here6" |> print()
- # # "got here" |> print()
- # df_slrImpacts |> filter(!is.na(scaled_impacts)) |> nrow() |> print()
- # # df_slrImpacts |> names() |> print()
-
- df_slrImpacts <- df_slrImpacts |> rename_at(.vars=c(all_of(cols1[2])), ~cols0[2])
- # df_slrImpacts |> filter(!is.na(scaled_impacts)) |> nrow() |> print()
- # df_slrImpacts |> names() |> print()
- rm("slrGroupByCols", "slr_names", "slrScenario")
- rm("cols0", "cols1")
- }
- rm("nrow_oth")
- # df_slrImpacts |> filter(!is.na(scaled_impacts)) |> nrow() |> print()
-
- ### Get scenario ID and adjust the model value
- # df_slrMax |> names() |> print()
- df_slrMax <- df_slrMax |> mutate(model_dot = "Interpolation")
- df_slrImpacts <- df_slrImpacts |> mutate(model_dot = "Interpolation")
- ### Scenario ID
- df_slrMax <- df_slrMax |> get_scenario_id(include=c("model_dot", "region"))
- df_slrImpacts <- df_slrImpacts |> get_scenario_id(include=c("model_dot", "region"))
- # df_slrMax$scenario_id |> unique() |> head() |> print()
- # ### Check names
- # df_slrMax |> names() |> print()
- # check_max_ids <- df_slrMax$scenario_id; # check_max_ids |> head() |> print(); rm("check_max_ids)
-
-
- ###### ** SLR Join Scaled Impacts ######
- ### Add other results back in
- # "got here7" |> print()
- df_slrMax <- df_slrMax |> select(c(all_of(impactSelectCols)))
- df_slrImpacts <- df_slrImpacts |> select(c(all_of(impactSelectCols)))
- df_slrMax_ids <- df_slrMax$scenario_id |> unique() |> sort()
- df_slrImp_ids <- df_slrImpacts$scenario_id |> unique() |> sort()
- # df_slrMax_ids |> head() |> print(); df_slrImp_ids |> head() |> print()
-
- # slrMaxYears |> length() |> print()
- # df_slrMax |> filter(!is.na(scaled_impacts)) |> nrow() |> print()
- # df_slrImpacts |> filter(!is.na(scaled_impacts)) |> nrow() |> print()
- # "got here8" |> print()
-
- df_slrImpacts <- df_slrImpacts |> rbind(df_slrMax)
- df_slrImpacts <- df_slrImpacts |> arrange_at(.vars=c("scenario_id", "year"))
- df_slrImpacts <- df_slrImpacts |> filter(!is.na(scaled_impacts))
- # df_slrImpacts <- df_slrImpacts |> mutate(across("scenario_id",str_replace, '(\\d)[0-9]{1,3}cm', '\\Interpolation'))
- rm("df_slrMax")
- # df_slrImpacts |> filter(!is.na(scaled_impacts)) |> nrow() |> print()
-
- ### Bind with other results
- # df_scenarioResults |> names() |> print(); df_slrImpacts$scenario_id |> head() |> print()
- df_scenarioResults <- df_scenarioResults |> rbind(df_slrImpacts)
- rm("df_slrImpacts")
- # "got here9" |> print()
-
- ###### ** SLR Initial Results #######
- ### Separate initial results out
- # # initialResults_slr <- initialResults_slr |> mutate(scenario_id = scenario_id)
- # initialResults_slr |> names() |> print()
- ### Join with model type or model
- ### Add additional columns and get scenario ID
- initialResults_slr <- initialResults_slr |> left_join(co_modelTypes, by="modelType")
- modelCols0 <- c("model_id", "model_dot", "model_underscore", "model_label")
- initialResults_slr[,modelCols0] <- "Interpolation"
- initialResults_slr <- initialResults_slr |> get_scenario_id(include=c("model_dot", "region"))
- rm("modelCols0")
-
- # "got here" |> print()
- in_slrImp_ids <- initialResults_slr$scenario_id |> unique() |> sort()
- # in_slrImp_ids |> head() |> print()
- check_inMax_ids <- df_slrMax_ids[!(df_slrMax_ids %in% in_slrImp_ids)];
- check_inSlr_ids <- df_slrImp_ids[!(df_slrImp_ids %in% in_slrImp_ids)];
- # check_inMax_ids |> head() |> print(); check_inSlr_ids |> head() |> print()
-
- # # df_impacts |> names() |> print()
- # # initialResults_slr$scenario_id |> head() |> print()
- # initialResults_slr |> filter(!is.na(econScalarValue)) |> nrow() |> print()
-
- ###### ** SLR Bind Initial Results #######
- ### Arrange and bind with other results
- initialResults_slr <- initialResults_slr |> select(-c("model_type"))
- initialResults_slr <- initialResults_slr |> arrange_at(.vars=c("scenario_id", "year"))
-
- # names1 <- initialResults_slr |> names(); names2 <- initialResults |> names(); # names1[!(names1 %in% names2)] |> print()
- # initialResults_slr |> names() |> print(); initialResults |> names() |> print()
- initialResults <- initialResults |> rbind(initialResults_slr)
- rm("initialResults_slr")
- }
- rm("impactSelectCols")
+ df_slr0 <- df_results0_slr |> get_slrScaledImpacts(df1=df_drivers)
+ df_scenarioResults <- df_scenarioResults |> rbind(df_slr0)
+ rm(df_slr0)
+ } ### End if(nrow_slr)
+
+ ###### ** Format Scaled Impacts ######
+ ### Drop columns
+ # df_results0 |> glimpse() |> print()
+ drop0 <- c("modelUnitValue")
+ df_scenarioResults <- df_scenarioResults |> select(-all_of(drop0))
+ # df_scenarioResults |> names() |> print()
+ rm(drop0)
+ ### Message user
+ if(msgUser) message("\t", list_messages[["scaledImpacts"]]$success)
###### Calculate Impacts ######
### Join results with initialized results and update missing observations with NA
- ### Remove intermediate values
- # initialResults |> names |> print(); df_scenarioResults |> names() |> print()
- df_impacts <- initialResults |> left_join(df_scenarioResults, by=c("scenario_id", "year"));
- rm("initialResults")
- if(msgUser) message("\t", list_messages[["scaledImpacts"]]$success)
+ ### Drop columns, then join with scenario results
+ # df_results0_gcm |> names() |> print(); df_results0_slr |> names() |> print()
+ join0 <- c("scenario_id", "year")
+ df_results0 <- df_results0_gcm |> rbind(df_results0_slr)
+ # df_results0 |> names() |> print()
+ # return(df_results0)
+ # df_results0 |> names() |> print(); df_scenarioResults |> names() |> print()
+ # df_results0 |> glimpse(); df_scenarioResults |> glimpse()
+ # df_results0$scenario_id |> unique() |> head() |> print();
+ # df_scenarioResults$scenario_id |> unique() |> head() |> print()
+ df_impacts <- df_results0 |> left_join(df_scenarioResults, by=c(join0));
+ # df_impacts |> glimpse()
+ rm(df_results0, df_scenarioResults); rm(join0)
- # df_impacts |> names() |> print()
- # initialResults$modelUnit_label |> unique() |> print()
- # df_impacts |> filter(modelUnit_label=="cm") |> filter(!is.na(scaled_impacts)) |> nrow() |> print()
- # (df_impacts |> filter(modelUnit_label=="cm"))$year |> range() |> print()
### Physical impacts = physScalar * scaled_impacts
### Annual impacts = phys-econ scalar value by the scaled impacts
- # df_impacts <- df_impacts |> mutate(hasPhysImpacts = 1 * !is.na(physicalmeasure))
- # df_impacts <- df_impacts |> mutate(hasPhysImpacts = 1 * !is.na(physScalar))
df_impacts <- df_impacts |> mutate(physical_impacts = scaled_impacts * physScalar)
df_impacts <- df_impacts |> mutate(annual_impacts = scaled_impacts * physEconScalar)
- # df_impacts <- df_impacts |> select(-c("hasPhysImpacts")) #|> as.data.frame()
###### Add Scenario Information ######
### Add in model info
- message("Formatting results", "...")
- df_impacts <- df_impacts |> filter(year>=minYear) |> rename(model_type = modelType)
- df_drivers <- df_drivers |> filter(year>=minYear) |> rename(model_type = modelType)
- df_results <- df_impacts |> left_join(df_drivers, by=c("year", "model_type"))
- rm("df_impacts")
- # (df_results |> filter(modelUnit_label=="cm"))$year |> range() |> print()
- # df_results |> filter(modelUnit_label=="cm") |> filter(!is.na(scaled_impacts)) |> nrow() |> print()
+ paste0("Formatting results", "...") |> message()
+
+ # ###### ** Model Types ######
+ # ### Model types and models
+ rename0 <- c("model_label", "modelType_label", "modelUnitValue", "modelUnit_label", "modelUnitDesc")
+ rename1 <- c("model" , "model_type" , "driverValue" , "driverUnit", "driverType")
+ select0 <- rename0 |> c("modelType")
+ join0 <- c("modelType", "year")
+ drop0 <- c("model_type")
+ drop1 <- c("modelType")
+ # drop1 <- drop0 |> c("modelUnitValue")
+ ### Drop columns
+ # df_drivers |> glimpse(); df_impacts |> glimpse()
+ # df_drivers <- df_drivers |> select(all_of(select0))
+ df_impacts <- df_impacts |> select(-any_of(drop0))
+ ### Join values
+ df_results <- df_impacts |> left_join(df_drivers, by=c(join0))
+ rm(df_drivers, df_impacts)
+ ### Drop join column & rename columns
+ df_results <- df_results |> select(-any_of(drop1))
+ df_results <- df_results |> rename_at(c(rename0), ~rename1)
+ rm(rename0, rename1, select0, join0, drop0, drop1)
+ # df_results |> glimpse()
+
+
### Update inputs in outputs list
returnList[["results"]] <- df_results
###### Format Outputs ######
### Refactor sectors, variants, impactTypes
- co_variants <- co_variants |> mutate(sector_variant = paste(sector_id, variant_id, sep="_"))
- co_impactTypes <- co_impactTypes |> mutate(sector_impactType = paste(sector_id, impactType_id, sep="_"))
+ co_variants <- co_variants |> mutate(sector_variant = sector_id |> paste0("_", variant_id))
+ co_impactTypes <- co_impactTypes |> mutate(sector_impactType = sector_id |> paste0("_", impactType_id))
#### Rename Sector Columns
df_results <- df_results |> rename(sector_id = sector, sector = sector_label)
- #### Regions
+ #### Regions #wm should we do something similar for state?
# df_results |> names() |> print()
- reg_lvls <- co_regions$region_dot
- reg_lbls <- co_regions$region_label
+ reg_lvls <- co_regions[["region_dot"]]
+ reg_lbls <- co_regions[["region_label"]]
df_results <- df_results |> rename(region_id = region)
df_results <- df_results |> mutate(region = region_id |> factor(reg_lvls, reg_lbls))
- rm("reg_lvls", "reg_lbls")
-
- ### Model types and models
- modelCols0 <- c("model_label", "modelType_label", "modelUnitValue", "modelUnit_label", "modelUnitDesc")
- modelCols1 <- c("model" , "model_type" , "driverValue" , "driverUnit", "driverType")
- modelCols2 <- c("model_id", "model_dot", "model_underscore", "modelUnit_id")
- modelCols3 <- c("modelRefYear", "modelMaxOutput", "modelUnitScale", "modelMaxExtrap")
- df_results <- df_results |> select(-c("model_type"))
- df_results <- df_results |> rename_at(.vars=c(all_of(modelCols0)), ~modelCols1)
- df_results <- df_results |> select(-c(all_of(modelCols2), all_of(modelCols3)))
- rm("modelCols0", "modelCols1", "modelCols2", "modelCols3")
- # df_results |> names() |> print()
- # (df_results |> filter(driverUnit=="cm"))$year |> range() |> print()
+ rm(reg_lvls, reg_lbls)
+
+
### Variant labels
- var_lvls <- co_variants$sector_variant
- var_lbls <- co_variants$variant_label
- df_results <- df_results |> mutate(sect_var = sector_id |> paste(variant, sep="_"))
- df_results <- df_results |> mutate(variant = sect_var |> factor(var_lvls, var_lbls))
+ var_lvls <- co_variants[["sector_variant"]]
+ var_lbls <- co_variants[["variant_label"]]
+ df_results <- df_results |> mutate(sect_var = sector_id |> paste0("_", variant))
+ df_results <- df_results |> mutate(variant = sect_var |> factor(var_lvls, var_lbls))
df_results <- df_results |> select(-c("sect_var"))
rm("var_lvls", "var_lbls")
# (df_results |> filter(driverUnit=="cm"))$year |> range() |> print()
### Impact types
- imp_lvls <- co_impactTypes$sector_impactType
- imp_lbls <- co_impactTypes$impactType_label
- df_results <- df_results |> mutate(sect_imp = sector_id |> paste(impactType, sep="_"))
- df_results <- df_results |> mutate(impactType = sect_imp |> factor(imp_lvls, imp_lbls))
+ imp_lvls <- co_impactTypes[["sector_impactType"]]
+ imp_lbls <- co_impactTypes[["impactType_label"]]
+ df_results <- df_results |> mutate(sect_imp = sector_id |> paste0("_", impactType))
+ df_results <- df_results |> mutate(impactType = sect_imp |> factor(imp_lvls, imp_lbls))
df_results <- df_results |> select(-c("sect_imp", "sector_id"))
rm("imp_lvls", "imp_lbls")
# (df_results |> filter(driverUnit=="cm"))$year |> range() |> print()
###### Columns ######
- ### Scalar column names, Sector info names, Scenario names
- cGroupByCols0 <- c("sector", "variant", "impactYear", "impactType", "model_type", "model", "region")
- cAddCols0 <- c("sectorprimary", "includeaggregate")
- cSectorInfoNames <- c("modelUnitType", "c0", "c1", "exp0", "year0")
- cScenarioNames <- c("scenario_id")
- cScalarNames <- c("physScalarName", "physAdjName", "damageAdjName", "physScalar") |>
- c("physScalarValue", "physAdjValue" , "damageAdjValue") |>
- c("econScalarName" , "econMultiplierName" , "econScalar") |>
- c("econScalarValue", "econMultiplierValue", "econAdjValue", "econMultiplier") |>
- c("physEconScalar" , "scaled_impacts")
- selectCols0 <- c(cScenarioNames, cScalarNames, cSectorInfoNames)
+ ### Grouping columns
+ groupCols0 <- c("sector", "variant", "impactType", "impactYear", "region") |> c(stateCols0)
+ groupCols0 <- groupCols0 |> c("model_type", "model")
+ groupCols0 <- groupCols0 |> c("modelUnitType")
+ groupCols0 <- groupCols0 |> c("sectorprimary", "includeaggregate")
+ groupCols0 <- groupCols0 |> c("physicalmeasure")
+ groupCols0 <- groupCols0 |> (function(x){x[!(x %in% (x |> names()))]})()
+ groupCols0 <- groupCols0 |> unique()
+ ### Driver columns
+ driverCols0 <- c("driverType", "driverUnit", "driverValue")
+ ### National & regional scenario columns
+ scenCols0 <- c("gdp_usd", "national_pop", "gdp_percap") |> c(popCol0)
+ ### Impact columns
+ impactCols0 <- c("physical_impacts", "annual_impacts")
+ ### Columns to select
+ select0 <- groupCols0 |> c(driverCols0, scenCols0) |> c("year") #|> c(impactCols0)
+ ### Relocate columns
+ # df_results |> names() |> print(); groupCols0 |> print(); select0 |> print()
+ df_results <- df_results |> relocate(all_of(select0))
+ # scenarioCol0 <- c("scenario_id")
+
+ ### Scalar columns
+ scalarCols0 <- c("physScalar", "physAdj", "damageAdj", "econScalar", "econAdj", "econMultiplier")
+ infoCols0 <- c("c0", "c1", "exp0", "year0")
+ suffix0 <- c("Name", "Value")
+ scalarCols0 <- scalarCols0 |> map(~.x |> paste0(c(suffix0))) |> unlist()
+ scalarCols0 <- scalarCols0 |> c("physScalar", "econScalar", "physEconScalar")
+ scalarCols0 <- scalarCols0 |> c("scaled_impacts")
+ scalarCols0 <- infoCols0 |> c(scalarCols0)
+ rm(suffix0, infoCols0)
+ ### Rearrange or drop scalar columns
+ # df_results |> names() |> print()
+ if(allCols){
+ df_results <- df_results |> relocate(any_of(scalarCols0), .after=all_of(select0))
+ } else{
+ df_results <- df_results |> select(-any_of(scalarCols0))
+ } ### End if(allCols)
+ # df_results |> names() |> print()
+ ### Other columns
+ # otherCols0 <- df_results |> names() |> (function(x){x[!(x %in% c(select0, scalarCols0))]})()
+ # df_results <- df_results |> relocate(all_of(otherCols0), .before=all_of(impactCols0))
+ otherCols0 <- df_results |> names() |> (function(x){x[!(x %in% c(select0, scalarCols0, impactCols0))]})()
+ df_results <- df_results |> select(-all_of(otherCols0))
### Convert to character and drop sector id
- df_results <- df_results |> mutate_at(.vars = c(all_of(cGroupByCols0)), as.character)
- df_results <- df_results |> filter(!is.na(sector))
+ df_results <- df_results |> mutate_at(c(groupCols0), as.character)
+ # df_results <- df_results |> filter(!(sector |> is.na()))
# (df_results |> filter(driverUnit=="cm"))$year |> range() |> print()
###### Testing ######
- if(!testing) {df_results <- df_results |> select(-c(all_of(selectCols0)))}
-
+ # if(allCols) {
+ # drop1 <- select0
+ # df_results <- df_results |> select(-all_of(drop1))
+ # } ### End if(!allCols)
+ # # df_results |> glimpse()
+ # # return(df_results)
+
+ ###### Primary Columns ######
+ if(doPrimary){
+ df_results <- df_results |> filter(sectorprimary ==1)
+ df_results <- df_results |> filter(includeaggregate==1)
+ } ### End if(doPrimary)
+ df_results <- df_results |> mutate_at(c("sectorprimary", "includeaggregate"), as.numeric)
###### Aggregation ######
- ### For regular use (i.e., not impactYears), simplify the data:
+ ### For regular use (i.e., not impactYears), simplify the data: groupCols0
if(requiresAgg){
- ### Aggregation types
- aggGroupByCols <- cGroupByCols0
- includeAggCol <- c("includeaggregate")
- if( doPrimary){df_results <- df_results |> filter(includeaggregate==1) |> select(-c(all_of(includeAggCol)))}
- if(!doPrimary){aggGroupByCols <- aggGroupByCols |> c(includeAggCol)}
- ### If the user specifies primary type, filter to primary types and variants and drop that column
- # df_results |> nrow() |> print(); df_results |> head() |> glimpse()
- df_results <- df_results |> as.data.frame() |> aggregate_impacts(aggLevels = aggLevels, groupByCols = aggGroupByCols)
- # df_results |> nrow() |> print(); df_results |> head() |> glimpse()
-
- rm("aggGroupByCols")
- }
+ # df_results <- df_results |> aggregate_impacts(aggLevels=aggLevels, groupByCols=groupCols0)
+ group0 <- groupCols0
+ # group0 <- select0 |> (function(x){x[!(x %in% driverCols0)]})()
+ # select0 |> print(); df_results |> names() |> print()
+ # group0 <- select0
+ df_results <- df_results |> aggregate_impacts(
+ aggLevels = aggLevels,
+ groupByCols = group0,
+ columns = impactCols0
+ ) ### End aggregate_impacts
+ } ### End if(requiresAgg)
# df_results |> names() |> print()
###### Order the Output ######
### Convert levels to character
### Order the rows, then order the columns
- if(!testing){
- resultNames <- df_results |> names()
- groupByCols <- c("sector", "variant", "impactYear", "impactType", "region", "model_type", "model", "year")
- driverCols <- c("driverValue", "driverUnit", "driverType")
- nonGroupCols <- resultNames[which(!(resultNames %in% c(groupByCols, driverCols)))]
- orderColIndex <- which(names(data) %in% groupByCols)
- selectCols <- c(groupByCols, driverCols, nonGroupCols) |> (function(x){x[x!="annual_impacts"] |> c("annual_impacts")})()
- ### Select columns
- df_results <- df_results |> select(c(all_of(selectCols))) |> arrange_at(.vars=c(all_of(groupByCols)))
- }
-
- # c_aggColumns <- c("sectorprimary", "includeaggregate") |> (function(y){y[which(y %in% names(df_results))]})()
- # if(length(c_aggColumns)>0){df_results <- df_results |> mutate_at(.vars=c(all_of(c_aggColumns)), as.numeric)}
+ # if(!allCols){
+ arrange0 <- groupCols0 |> c("year")
+ arrange0 <- arrange0 |> (function(x){x[x %in% names(df_results)]})()
+ ### Select columns
+ df_results <- df_results |> arrange_at(c(arrange0))
+ df_results <- df_results |> relocate(any_of(select0))
+ # df_results <- df_results |> relocate(any_of(otherCols0), .after=any_of(select0))
+ rm(arrange0)
+ # } ### End if(!allCols)
###### Format as Data Frame ######
- ### Format as data frame
### Update results in list
- df_results <- df_results |> ungroup() |> as.data.frame()
+ df_results <- df_results |> ungroup()
returnList[["results"]] <- df_results
### Which object to return
if(outputList) {returnObj <- returnList}
diff --git a/FrEDI/R/run_fredi_sv.R b/FrEDI/R/run_fredi_sv.R
index 9015e99e..893f2add 100644
--- a/FrEDI/R/run_fredi_sv.R
+++ b/FrEDI/R/run_fredi_sv.R
@@ -409,7 +409,7 @@ run_fredi_sv <- function(
### Otherwise use default scenario and add scenario column
msg2 |> message("Using default temperature scenario...")
# rDataList$co_defaultTemps |> names() |> print()
- driverInput <- rDataList$co_defaultTemps |>
+ driverInput <- rDataList$frediData$data$co_defaultTemps |>
mutate(temp_C = temp_C_global |> convertTemps(from="global")) |>
mutate(scenario="FrEDI Default")
### Select columns
@@ -423,7 +423,7 @@ run_fredi_sv <- function(
c_scenarios <- driverInput$scenario |> unique()
n_scenarios <- c_scenarios |> length()
### Ref year
- refYearTemp <- (rDataList$co_modelTypes |> filter(modelUnitType=="temperature"))$modelRefYear[1]
+ refYearTemp <- (rDataList$frediData$data$co_modelTypes |> filter(modelUnitType=="temperature"))$modelRefYear[1]
### Drivers
drivers_df <- c_scenarios |> lapply(function(
scenario_i, data_x = driverInput,
@@ -479,7 +479,7 @@ run_fredi_sv <- function(
c_scenarios <- driverInput$scenario |> unique()
n_scenarios <- c_scenarios |> length()
### Ref year
- refYearSLR <- (rDataList$co_modelTypes |> filter(modelUnitType=="slr"))$modelRefYear[1]
+ refYearSLR <- (rDataList$frediData$data$co_modelTypes |> filter(modelUnitType=="slr"))$modelRefYear[1]
### Drivers
drivers_df <- c_scenarios |> lapply(function(
scenario_i, data_x = driverInput,
@@ -663,7 +663,7 @@ run_fredi_sv <- function(
})
###### Format Results ######
### Bind results and ungroup
- df_results <- df_results (function(x){do.call(rbind, x)})()
+ df_results <- df_results |> (function(x){do.call(rbind, x)})()
df_results <- df_results |> ungroup() |> as.data.frame()
###### Save Results ######
diff --git a/FrEDI/R/sysdata.rda b/FrEDI/R/sysdata.rda
index f4545b37..417c8b45 100644
Binary files a/FrEDI/R/sysdata.rda and b/FrEDI/R/sysdata.rda differ
diff --git a/FrEDI/R/temps2slr.R b/FrEDI/R/temps2slr.R
index 6d7d9375..e80b48ef 100644
--- a/FrEDI/R/temps2slr.R
+++ b/FrEDI/R/temps2slr.R
@@ -86,17 +86,18 @@ temps2slr <- function(
###### Initial Values ######
### Reference year 2000 and equilibrium temperature offset for 2000
### Assign reference year from config file (max_year)
- temps2slr_constants <- fredi_config$temps2slr
- for(i in 1:length(temps2slr_constants)){
- assign(names(temps2slr_constants)[i], temps2slr_constants[[i]])
- }
+ fredi_config <- "fredi_config" |> get_frediDataObj("frediData")
+ temps2slr_constants <- fredi_config[["temps2slr"]]
+ names_slrConstants <- temps2slr_constants |> names()
+ for(name_i in names_slrConstants){assign(name_i, temps2slr_constants[[name_i]])}
#### Reference year is 2000
- ref_year0 <- rDataList[["co_modelTypes"]] |> filter(modelType_id == "slr") |> (function(x){x$modelRefYear[1]})()
+ co_modelTypes <- rDataList[["frediData"]][["data"]][["co_modelTypes"]] |> filter(modelType_id == "slr")
+ ref_year0 <- co_modelTypes$modelRefYear[1]
# year0 <- ref_year0
eqtemp_offset <- 0.62
###### Other constants ######
- mm2cm <- 0.1 ### Number of centimeters per millimeter
+ mm2cm <- 0.1 ### Number of centimeters per millimeter
# We used HadCrUT4 to determine the appropriate temperature offset between the actual temperature and the equilibrium temperature in 2000.
# Met Office Hadley Centre observations datasets. https://www.metoffice.gov.uk/hadobs/hadcrut4/data/current/download.html.
@@ -109,7 +110,7 @@ temps2slr <- function(
# ind_x <- 1:num_x
# ### Initialize Data
- # df_x0 <- data.frame(year = years, temp_C = temps) |>
+ # df_x0 <- tibble(year = years, temp_C = temps) |>
# filter(year >= year0) |>
# filter(year <= max_year)
#
@@ -127,7 +128,7 @@ temps2slr <- function(
###### Initialize Data ######
### Filter NA values and make sure values are numeric
- df_x0 <- data.frame(year = years, temp_C = temps) |>
+ df_x0 <- tibble(year = years, temp_C = temps) |>
mutate_at(c("year", "temp_C"), as.character) |>
mutate_at(c("year", "temp_C"), as.numeric) |>
filter(!is.na(year) & !is.na(temp_C)) |>
@@ -145,9 +146,7 @@ temps2slr <- function(
df_x0 <- df_x0 |>
group_by_at(.vars = c("year")) |>
summarize_at(c("temp_C"), mean, na.rm=T)
- }
-
-
+ } ### End if(hasDuplicates)
### Check for 2020 (year0)
checkRefYear <- (ref_year0 %in% df_x0$year)
@@ -155,22 +154,20 @@ temps2slr <- function(
if(!checkRefYear){
minYearInput0 <- df_x0$year |> min(na.rm=T)
maxYearInput0 <- df_x0$year |> max(na.rm=T)
- checkRefYear <- (minYearInput0 < ref_year0) & (maxYearInput0 > ref_year0)
+ checkRefYear <- (minYearInput0 < ref_year0) & (maxYearInput0 > ref_year0)
}
### If 2020 is still not found, message the user and exit
###To-do exit gracefully within tempbin()
+ ### Else, if there is a valid temperature series: Calculate temperatures
if(!checkRefYear) {
message("\t", "Warning:")
message("\t\t", "In 'temps2slr()': Missing values for the reference year ", ref_year0 , ".")
message("\t\t\t", "The reference year ", ref_year0 , " must be present in the input (or the input must have values above and below the reference year).")
-
message("\n", "\t", "Enter a valid temperature series.")
message("\n", "Exiting...")
return()
- }
- ### If there is a valid temperature series
- else{
+ } else{
year0 <- df_x0$year |> min(na.rm=T)
###### Standardize data #####
@@ -186,47 +183,44 @@ temps2slr <- function(
###### Interpolate the data #####
### Interpolated Data
# function require annual data
- df_x1 <-
- data.frame(year = new_years0, temp_C = NA) |>
- mutate(temp_C=approx(
- x = df_x0$year,
- y = df_x0$temp_C,
- xout = new_years0,
- rule = 2
- )$y) |>
- filter(year>=ref_year0) |>
- mutate(equilTemp = NA, slr_mm = NA) |>
- select(year, temp_C, equilTemp, slr_mm) |>
- mutate(yearFrom0 = year - ref_year0) |>
- mutate(phi = phi0 * exp(-yearFrom0 / tau2))
-
+ df_x1 <- tibble(year = new_years0, temp_C = NA)
+ df_x1 <- df_x1 |> mutate(temp_C=approx(
+ x = df_x0$year,
+ y = df_x0$temp_C,
+ xout = new_years0,
+ rule = 2
+ )$y)
+ df_x1 <- df_x1 |> filter(year>=ref_year0)
+ df_x1 <- df_x1 |> mutate(equilTemp = NA, slr_mm = NA)
+ df_x1 <- df_x1 |> select(c("year", "temp_C", "equilTemp", "slr_mm"))
+ df_x1 <- df_x1 |> mutate(yearFrom0 = year - ref_year0)
+ df_x1 <- df_x1 |> mutate(phi = phi0 * exp(-yearFrom0 / tau2))
###### Series ######
### Calculate base values
- df_x <- df_x1 |>
- ### Equilibrium temps
- (function(k){
- for(i in ind_x){
- if(i == 1){
- ### Initialize temperature
- temp_C0 <- (df_x1 |> filter(year==ref_year0))$temp_C[1]
- k$equilTemp[i] <- temp_C0 - eqtemp_offset
- k$slr_mm[i] <- 0
- } else{
- k$equilTemp[i] <- k$equilTemp[i - 1] + ( k$temp_C[i] - k$equilTemp[i-1] ) / tau1
- k$slr_mm[i] <- k$slr_mm[i-1] + alpha * ( k$temp_C[i] - k$equilTemp[i] ) + k$phi[i]
- }
- }
- return(k)
- })() |>
-
- ### GMSL in cm
- select(year, slr_mm) |>
- mutate(slr_cm = slr_mm * mm2cm) |> # convert from mm to cm
- select(-slr_mm)
-
+ ### Equilibrium temps
+ df_x <- df_x1 |> (function(k){
+ for(i in ind_x){
+ if(i == 1){
+ ### Initialize temperature
+ temp_C0 <- (df_x1 |> filter(year==ref_year0))$temp_C[1]
+ k$equilTemp[i] <- temp_C0 - eqtemp_offset
+ k$slr_mm [i] <- 0
+ } else{
+ k$equilTemp[i] <- k$equilTemp[i - 1] + ( k$temp_C[i] - k$equilTemp[i-1] ) / tau1
+ k$slr_mm [i] <- k$slr_mm[i-1] + alpha * ( k$temp_C[i] - k$equilTemp[i] ) + k$phi[i]
+ } ### End else
+ } ### End for(i in ind_x)
+ return(k)
+ })()
+
+ ### GMSL in cm
+ df_x <- df_x |> select(c("year", "slr_mm"))
+ df_x <- df_x |> mutate(slr_cm = slr_mm * mm2cm) ### convert from mm to cm
+ df_x <- df_x |> select(-c("slr_mm"))
+ ### Return
return(df_x)
- }
+ } ### End else
}
diff --git a/FrEDI/R/testing_general_fredi_test.R b/FrEDI/R/testing_general_fredi_test.R
index 33448ea6..4446c0d9 100644
--- a/FrEDI/R/testing_general_fredi_test.R
+++ b/FrEDI/R/testing_general_fredi_test.R
@@ -20,7 +20,44 @@ general_fredi_test <- function(
### Names of objects to save
c_genTest0 <- "dataInfo_test"
c_diff0 <- "outputs_diffs"
-
+ c_sectorNA0 <- "sector_NA"
+
+ #### Sector NA Check ####
+ #### Check if each sector has at least one non-NA-value
+
+ # Get names of sector "group-names" consistent with dplyr
+ sect_names <- newOutputs |>
+ group_by(sector) |>
+ group_keys()
+
+ # For each sector group, check if each sector has nonNA values
+ sect_na_check <- newOutputs |>
+ group_by(sector) %>%
+ group_map(
+ ~ has_nonNA_values_df(.)
+ ## add check for annual impacts
+ ) %>%
+ set_names(sect_names$sector)
+
+ # If there are any sector groups with all NA values, filter to that sector and save to output list
+
+ sect_na_check$`ATS Extreme Temperature` <- 1
+
+ if(any(sect_na_check >0)){
+
+ sectorNA_names <- which(sect_na_check >0) |>
+ names()
+ sectorNA <- newOutputs |>
+ filter(
+ sector %in% sectorNA_names )
+ save_list[[c_sectorNA0]] <- sectorNA
+
+ } else {
+
+ save_list[[c_sectorNA0]] <- tibble("All Sectors have some non-missing and non-zero values")
+ }
+
+
###### Load Reference Data ######
### Load previous Default Data to compare to new outputs
### 0. Names for reference data
diff --git a/FrEDI/R/utils.R b/FrEDI/R/utils.R
index 3778b3ed..a3106d65 100644
--- a/FrEDI/R/utils.R
+++ b/FrEDI/R/utils.R
@@ -4,7 +4,7 @@ get_vector <- function(
data, column = NULL
){
### Select column and get values as vector
- col0 <- ifelse(is.null(column), c(), column)
+ col0 <- column |> is.null() |> ifelse(c(), column)
vals0 <- data[[column]] |> as.vector()
### Return
return(vals0)
@@ -37,6 +37,42 @@ get_msgPrefix <- function(level=1){
return(msg_x)
}
+### This function makes it easier to get data objects from the sysdata.rda file
+get_frediDataObj <- function(
+ x = NULL, ### Object name
+ listSub = "frediData", ### Sublist name
+ listall = FALSE,
+ listName = "rDataList",
+ pkg = "FrEDI",
+ lib.loc = .libPaths()[1] ### Library path to look for packages
+){
+ ### Messaging
+ msg0 <- "\t"
+ ### Check if list name exists
+ exists0 <- listName |> exists()
+ ### If the listname exists in the name space, parse it
+ ### Otherwise, grab it from a package name space
+ if(exists0){new_x <- parse(text=listName) |> eval()}
+ else {
+ ### Check if package & list name
+ pkgList0 <- lib.loc |> installed.packages()
+ pkgExists0 <- pkg %in% pkgList0
+ if(!pkgExists0){
+ msg0 |> paste0("Package doesn't exist...") |> message()
+ msg0 |> paste0("Exiting...") |> message()
+ return()
+ } ### End if(!pkgExists0)
+ else {new_x <- getFromNamespace(listName, ns=pkg)}
+ } ### End else(exists0)
+
+ ### Whether to list all items in data object or not
+ # if(listall) {return_x <- new_x |> names()}
+ # else {return_x <- new_x[[x]]}
+ if(listall) {return_x <- new_x |> names()}
+ else {return_x <- new_x[[listSub]][["data"]][[x]]}
+ ### Return
+ return(return_x)
+} ### End get_frediDataObj
###### interpolate_annual ######
### Created 2021.02.05. Updated 2021.02.05
@@ -48,189 +84,707 @@ interpolate_annual <- function(
column = NULL, ### Column to create results for
rule = NULL, ### for interpolation,
method = NULL, ### method for interpolation; default=linear
- region = "National.Total" ### Region if "addRegion"
+ region = "National.Total", ### Region if "addRegion"
+ byState = FALSE ### If breakdown by state
){
###### Data Info ######
+ ##### Other values
+ region0 <- region
+ rm(region)
+ ##### By state
+ if (byState) {stateCols0 <- c("state", "postal")} else{stateCols0 <- c()}
##### Columns
dataCols <- data |> names()
- defCols <- c("year", "region")
+ defCols <- c("year", "region") |> c(stateCols0)
defCol0 <- dataCols[!(dataCols %in% defCols)][1]
- column0 <- ifelse(is.null(column), defCol0, column)
+ column0 <- column |> is.null() |> ifelse(defCol0, column)
othCols <- dataCols[!(dataCols %in% c(defCols, column0))]
- rm("defCol0")
+ # defCol0 |> print(); column0 |> print()
+ rm(defCol0)
+
###### Format data
- data <- data |> filter(!is.na(column0))
- values0 <- data |> get_vector(column0)
- years0 <- data |> get_vector(defCols[1])
+ # column0 |> print()
+ data <- data |> filter(!(column0 |> is.na()))
+ values0 <- data[[column0]]
+ years0 <- data[["year" ]]
+
### Interpolation years
- doYears <- is.null(years)
+ doYears <- years |> is.null()
if(doYears){
- years <- years0 |> range(na.rm=TRUE);
+ years <- years0 |> range(na.rm=TRUE)
years <- years[1]:years[2]
- }; rm("doYears")
+ } ### End if(doYears)
+ rm(doYears)
+
##### Regions
- regions0 <- region
addRegion <- !("region" %in% dataCols)
- if(addRegion) {data <- data |> mutate(region = regions0[1])}
- else {regions0 <- data |> get_uniqueValues("region")}
- rm("addRegion")
+ if (addRegion) {data <- data |> mutate(region = region0)}
+ rm(addRegion)
###### Interpolation Info ######
### - Return NA values outside of extremes
### - If only one value is provided use the same for left and right
### - Interpolation method
- nullRule <- is.null(rule)
- repRule <- length(rule) < 2
+ nullRule <- rule |> is.null()
+ repRule <- rule |> length() < 2
defRule <- c(1)
if(nullRule){rule <- defRule}
if(repRule ){rule <- rule |> rep(2)}
- method <- ifelse(is.null(method), "linear", method)
+ method <- method |> is.null() |> ifelse("linear", method)
- ###### Interpolate missing values for each region ######
+ ###### Interpolate NA values ######
### Filter to the region and then interpolate missing values
- cols0 <- c("x", "y");
- cols1 <- c("year", column0)
- df_interp <- regions0 |> lapply(function(region_i){
- ### Values
- which_i <- (data$region==region_i) |> which()
- x_i <- years0[which_i]
- y_i <- values0[which_i]
- ### Approximate
- new_i <- approx(x = x_i, y = y_i, xout = years, rule = rule, method = method)
- new_i <- new_i |> as.data.frame()
- new_i <- new_i |> rename_at(.vars=c(all_of(cols0)), ~cols1)
- new_i <- new_i |> mutate(region = region_i)
- ### Return
- return(new_i)
- }) |> (function(i){do.call(rbind, i)})()
+ cols0 <- c("x", "y")
+ cols1 <- c("year") |> c(column0)
+ ### Iterate over states if byState=T
+ ### Otherwise, iterate over regions
+ if (byState) {
+ states0 <- data |> get_uniqueValues("state" )
+ df_interp <- states0 |> map(function(state_i){
+ ### Values
+ df_i <- data |> filter(state==state_i)
+ x_i <- df_i[["year" ]]
+ y_i <- df_i[[column0]]
+ ### Approximate
+ new_i <- approx(x=x_i, y=y_i, xout=years, rule=rule, method=method)
+ new_i <- new_i |> as_tibble()
+ new_i <- new_i |> rename_at(c(cols0), ~cols1)
+ new_i <- new_i |> mutate(state=state_i)
+ # new_i |> names() |> print()
+ ### Return
+ return(new_i)
+ }) |> bind_rows()
+ } else { ### By state
+ regions0 <- data |> get_uniqueValues("region")
+ df_interp <- regions0 |> map(function(region_i){
+ ### Values
+ df_i <- data |> filter(region==region_i)
+ x_i <- df_i[["year" ]]
+ y_i <- df_i[[column0]]
+ ### Approximate
+ new_i <- approx(x=x_i, y=y_i, xout=years, rule=rule, method=method)
+ new_i <- new_i |> as_tibble()
+ new_i <- new_i |> rename_at(c(cols0), ~cols1)
+ new_i <- new_i |> mutate(region = region_i)
+ # new_i |> names() |> print()
+ ### Return
+ return(new_i)
+ }) |> bind_rows()
+ } ### End else
# df_interp |> names() |> print()
- # new_i <- new_i |> select(c(all_of(dataCols)))
- # new_i <- new_i |> select(c(all_of(defCols), all_of(column0)))
+
+ ### Drop yCol from data
+ # data |> glimpse(); df_interp |> glimpse(); cols1 |> print()
+ data <- data |> select(-all_of(cols1))
+ ### Join with original data:
+ names0 <- data |> names()
+ names1 <- df_interp |> names()
+ join0 <- names0[names0 %in% names1]
+ doJoin <- (names0 |> length() > 0) & (join0 |> length() > 0)
+ ### Group data
+ data <- data |>
+ group_by_at(c(names0)) |>
+ summarize(n=n(), .groups="keep") |> ungroup() |>
+ select(-c("n"))
+
+ ### Do join
+ if(doJoin){
+ # df_interp |> glimpse(); data |> glimpse()
+ data <- data |> left_join(df_interp, by=c(join0))
+ } else{
+ data <- df_interp
+ } ### End else
+
+ ### Arrange data
+ arrange0 <- c(join0, "year")
+ data <- data |> arrange_at(c(arrange0))
+
### Return
- return(df_interp)
+ return(data)
} ### End function
+###### get_scenario_id ######
+### Function to standardize the creation of the scenario_id
+get_scenario_id <- function(
+ data_x,
+ include=c("region_dot", "model_dot") ### Character vector of column names to include
+){
+ ### Other vals
+ mnl0 <- "\n" ### Message new line
+ msg0 <- "\t" ### Message indent level 0
+ mcom <- ", " ### Comma for collapsing lists
+ mqu0 <- "\'" ### Message quote
+ mqu1 <- mqu0 |> paste0(mcom, mqu0, collapse="")
+ mend0 <- "..."
+ ### Columns to include
+ main0 <- c("sector", "variant", "impactType", "impactYear")
+ cols0 <- main0 |> c(include)
+ ### Check names
+ names0 <- data_x |> names()
+ cCheck <- (cols0 %in% names0)
+ nCheck <- (!cCheck) |> which() |> length()
+ if(nCheck){
+ paste0("In get_scenario_id:") |> message()
+ msg0 |> paste0("Data is missing columns ", mqu0, cols0[!cCheck] |> paste(collapse=mqu1), mqu0, mend0) |> message()
+ paste0("Creating `scenario_id` from columns ", mqu0, cols0[cCheck] |> paste(collapse=mqu1), mqu0, mend0) |> message()
+ ### New names
+ cols0 <- cols0[cCheck]
+ } ### End if(nCheck)
+ ### Subset data
+ scen_x <- data_x[,cols0]
+ ### Get scenario IDs
+ scen_x <- scen_x |> apply(1, function(x){x |> as.vector() |> paste(collapse ="_")}) |> unlist()
+ data_x <- data_x |> mutate(scenario_id = scen_x)
+ ### Return
+ return(data_x)
+}
+
+###### summarize_seScenario ######
+### Summarize regional population
+summarize_seScenario <- function(
+ df0, ### Dataframe with population & national values like gdp_usd
+ national = TRUE ### If false, summarizes to regional level; otherwise, does national
+){
+ ### By state
+ byState <- "state" %in% (df0 |> names())
+ if( byState ){stateCols0 <- c("state", "postal")} else{stateCols0 <- c()}
+ if(!national){
+ popCol0 <- byState |> ifelse("state_pop", "reg_pop")
+ regCols0 <- c("region") |> c(stateCols0)
+ } else{
+ # "got here" |> print()
+ popCol0 <- c()
+ regCols0 <- c()
+ } ### End else
+
+ ### Regional column names
+ # regCols0 <- regCol0 |> c(stateCols0)
+ # regCols0 |> print()
+ ### Columns to group by
+ group0 <- regCols0 |> c("gdp_usd", "national_pop", "gdp_percap")
+ group0 <- group0 |> c("byState", "year")
+ ### Columns to drop
+ sumCols0 <- popCol0 |> c("n")
+ ### Add column count
+ df0 <- df0 |> mutate(n=1)
+
+ ### Mutate data
+ if(byState){
+ ### Add state column
+ df0 <- df0 |> mutate(byState = 1)
+ ### Bind a copy of the data
+ df0 <- df0 |>
+ mutate_at(c(stateCols0), function(y){"N/A"}) |>
+ mutate(byState = 0) |>
+ rbind(df0)
+ ### Summarize
+ df0 <- df0 |> group_by_at(c(group0)) |> summarize_at(c(sumCols0), sum, na.rm=T) |> ungroup()
+ # df0 |> glimpse()
+ # rm(group0)
+ # } ### End if(byState)
+ } else{
+ ### Add state column
+ df0 <- df0 |> mutate(byState = 0)
+ } ### End if(byState)
+
+ ### Drop count column
+ drop0 <- c("n")
+ df0 <- df0 |> select(-all_of(drop0))
+
+ ### Return
+ return(df0)
+}
+
+
+###### update_popScalars ######
+### Update scalars with regional population scenario
+update_popScalars <- function(
+ df_scalars, ### Tibble of scalars
+ df_pop, ### Socioeconomic scenario
+ groupCols = c("region") |> c("state", "postal") |> c("year"),
+ popCol = c("state_pop")
+){
+ ### Drop region population
+ df_scalars <- df_scalars |> filter(scalarName!="reg_pop")
+ # df_scalars <- df_scalars |> mutate(slrSector="N/A")
+
+ ### Format population data
+ drop0 <- c("gdp_usd", "national_pop", "gdp_percap")
+ df_pop <- df_pop |> summarize_seScenario(national=FALSE)
+ df_pop <- df_pop |> select(-any_of(drop0))
+
+ ### Add additional scalar attributes
+ df_pop <- df_pop |> mutate(scalarName = "reg_pop")
+ df_pop <- df_pop |> mutate(scalarType = "physScalar")
+ df_pop <- df_pop |> mutate(national_or_regional = "regional")
+ ### Rename column
+ df_pop <- df_pop |> rename_at(c(popCol), ~"value")
+
+ ### Bind values to scalars
+ # df_scalars |> glimpse(); df_pop |> glimpse()
+ df_scalars <- df_scalars |> rbind(df_pop)
+ # df_scalars |> glimpse();
+ ### Return
+ return(df_scalars)
+}
+###### extend_slrScalars ######
+### Scalars for SLR past 2090
+extend_slrScalars <- function(
+ df0, ### Dataframe of initial results
+ df_se, ### Socioeconomic scenario
+ df_scalars, ### Main scalar values: df_mainScalars
+ elasticity = NULL
+){
+ ###### Elasticity ######
+ ### Update exponent
+ if(!(elasticity |> is.null())){
+ # df_scalars <- df_scalars |> mutate(exp0 = (exp0 == 1) |> ifelse(exp0, elasticity))
+ df_scalars <- df_scalars |> mutate(exp0 = (econScalarName=="vsl_usd") |> ifelse(elasticity, exp0))
+ } ### End if(!(elasticity |> is.null()))
+
+ ###### By State ######
+ ### By state
+ byState <- TRUE
+ stateCols0 <- c("state", "postal")
+ popCol0 <- byState |> ifelse("state_pop", "reg_pop")
+
+ ###### Ref Year ######
+ ### Filter to years >= refYear (2090)
+ df_info <- "co_slrScalars" |> get_frediDataObj("frediData")
+ refYear0 <- (df_info[["refYear"]] |> unique())[1]
+ df_scalars <- df_scalars |> filter(year >= refYear0)
+ df_se <- df_se |> filter(year >= refYear0)
+ df0 <- df0 |> filter(year >= refYear0)
+ # df_se <- df_se |> filter(year >= refYear0)
+
+ ### Drop columns from data
+ drop0 <- c("physScalar", "physAdj", "damageAdj", "econScalar", "econMultiplier", "econAdj")
+ drop1 <- drop0 |> paste0("Name") |> c(drop0 |> paste0("Value"))
+ drop1 <- drop1 |> c("physScalar", "econScalar", "econMultiplier", "physEconScalar")
+ drop1 <- drop1 |> c("c1", "exp0")
+ df0 <- df0 |> select(-all_of(drop1))
+ rm(drop0, drop1)
+
+ ###### Join Data & Scalar Info ######
+ ### Join initial results & scalar info
+ drop0 <- c("byState")
+ join0 <- c("sector")
+ df_info <- df_info |> select(-all_of(drop0))
+ df0 <- df0 |> left_join(df_info, by=c(join0))
+ rm(drop0, join0)
+
+ ###### Set Default Columns ######
+ ### Make some columns none and 1
+ mutate0 <- c("damageAdj", "econScalar") |> paste0("Name")
+ mutate1 <- c("damageAdj", "econScalar") |> paste0("Value")
+ df0[,mutate0] <- "none"
+ df0[,mutate1] <- 1
+ rm(mutate0, mutate1)
+
+ ###### Get Multiplier Values ######
+ ### Gather econMultiplier scalars
+ gather0 <- c("gdp_usd", "gdp_percap")
+ select0 <- gather0 |> c("year")
+ # select0 <- gather0 |> c("byState", "year")
+ df_mult0 <- df_se |> summarize_seScenario(national=T)
+ df_mult0 <- df_mult0 |> filter(byState==0)
+ df_mult0 <- df_mult0 |> select(all_of(select0))
+ df_mult0 <- df_mult0 |> pivot_longer(
+ cols = all_of(gather0),
+ names_to = "econMultiplierName",
+ values_to = "econMultiplierValue"
+ ) ### End pivot_longer()
+
+ ### Drop values
+ rm(gather0, select0)
+
+ ### Join with data
+ join0 <- c("econMultiplierName", "year")
+ df0 <- df0 |> left_join(df_mult0, by=c(join0))
+ # df0 <- df0 |> mutate(econAdjName = econMultiplierName)
+ rm(join0)
+
+ ###### Economic Adjustment Values ######
+ ### Get adjustment values & join with data
+ rename0 <- c("econMultiplierValue", "year")
+ rename1 <- c("econAdjValue", "refYear")
+ join0 <- c("econMultiplierName", "refYear") #|> c("byState")
+ select0 <- join0 |> c("econAdjValue")
+ df_adj0 <- df_mult0 |> filter(year == refYear0)
+ df_adj0 <- df_adj0 |> rename_at(c(rename0), ~rename1)
+ # df0 |> glimpse(); df_adj0 |> glimpse()
+ df_adj0 <- df_adj0 |> select(all_of(select0))
+ df0 <- df0 |> left_join(df_adj0, by=c(join0))
+ ### Drop values
+ rm(rename0, rename1, select0, join0, df_mult0, df_adj0)
+ # "got here1" |> print()
+ ###### Economic Multiplier & Scalar ######
+ ### Calculate econ scalar values
+ df0 <- df0 |> mutate(econMultiplier = c1 * (econMultiplierValue / econAdjValue)**exp0)
+ df0 <- df0 |> mutate(econScalar = econScalarValue * econMultiplier)
+
+ ###### Physical Scalar Values ######
+ ###### Get Multiplier Values ######
+ ### Gather econMultiplier scalars
+ type0 <- "physScalar"
+ rename0 <- c("scalarName", "value")
+ rename1 <- c("physScalarName", "physScalarValue")
+ join0 <- c("physScalarName", "year") |> c("byState", "region") |> c(stateCols0)
+ # select0 <- rename1 |> c("year")
+ select0 <- rename1 |> c("year") |> c("byState", "region") |> c(stateCols0)
+ vals0 <- df0[["physScalarName"]] |> unique()
+ ### Filter, rename, select, join
+ df_phys0 <- df_scalars |> filter(scalarType == type0)
+ df_phys0 <- df_phys0 |> filter(scalarName %in% vals0)
+ df_phys0 <- df_phys0 |> rename_at(c(rename0), ~rename1)
+ df_phys0 <- df_phys0 |> select(all_of(select0))
+ # df0 |> glimpse(); df_phys0 |> glimpse()
+ df0 <- df0 |> left_join(df_phys0, by=c(join0))
+ ### Drop values
+ rm(type0, rename0, rename1, join0, select0, vals0)
+
+ ###### Physical Adjustment Values ######
+ ### Get adjustment values & join with data
+ rename0 <- c("physScalarName", "physScalarValue", "year")
+ rename1 <- c("physAdjName", "physAdjValue", "refYear")
+ join0 <- c("physAdjName", "refYear") |> c("byState", "region") |> c(stateCols0)
+ select0 <- rename1 |> c("refYear") |> c("byState", "region") |> c(stateCols0)
+ df_adj0 <- df_phys0 |> filter(year == refYear0)
+ df_adj0 <- df_adj0 |> rename_at(c(rename0), ~rename1)
+ df_adj0 <- df_adj0 |> select(all_of(select0))
+ df0 <- df0 |> mutate(physAdjName = physScalarName)
+ # df0 |> glimpse(); df_adj0 |> glimpse()
+ df0 <- df0 |> left_join(df_adj0, by=c(join0))
+ ### Drop values
+ rm(rename0, rename1, select0, join0, df_phys0, df_adj0)
+
+ ###### Physical Scalar & Phys Econ Scalar ######
+ ### Calculate econ scalar values
+ df0 <- df0 |> mutate(physScalar = c2 * physScalarValue / physAdjValue)
+ df0 <- df0 |> mutate(physEconScalar = econScalar * physScalar)
+
+ ### Drop columns & join
+ drop0 <- c("c2", "refYear")
+ df0 <- df0 |> filter(year > refYear)
+ df0 <- df0 |> select(-all_of(drop0))
+ rm(drop0)
+
+ ###### Return ######
+ ### Return
+ return(df0)
+}
###### match_scalarValues ######
-### Last updated 2021.02.05
+### Last updated 2023.11.15
### Match Scalar Values
### This function matches interpolated scalar values for each scalar type to the time series scenario information
### Scalar types are: physAdj, physMultiplier, damageAdj, econScalar, econMultiplier
### Function "match_scalarValues" replaces "get_popWts", "get_physMultipliers", and "get_econScalars"
match_scalarValues <- function(
- data, ### Initial results dataframe
+ df0, ### Initial results dataframe
scalars, ### Scalars dataframe
scalarType
){
- ### Scalar columns to rename
- newColNames <- scalarType |> paste0(c("Name", "Value"))
- renameCols <- "scalarName" |> c("value")
- ### Scalar identifier column
- scalarColName <- newColNames[1]
- scalarValName <- newColNames[2]
- ### Rename the scalar identifier column to match that of the data
- scalarNames_1 <- scalars |> names()
- names(scalars)[which(scalarNames_1 == renameCols[1])] <- scalarColName
-
- ###### Get scalars of particular type ######
- scalars <- scalars |> filter(scalarType==scalarType)
-
- ###### Separate scalar info into national and regional ######
- scalars_regional <- scalars |> filter(national_or_regional == "regional")
- scalars_national <- scalars |> filter(national_or_regional == "national")
+ ### Check if scalars are state-level
+ byState <- "state" %in% colnames(df0)
+ if(byState){stateCols0 <- c("state", "postal")} else{stateCols0 <- c()}
- ###### ScalarName == "None" ######
- ### Filter the data to those for which the scalar identifier == "none"...value = 1
- df_none <- data |> filter(data[,scalarColName] == "none") |> mutate(value = 1)
+ ###### Filter to Scalar Type ######
+ # scalarType |> print()
+ scalarType0 <- scalarType; rm(scalarType)
+ scalars <- scalars |> filter(scalarType==scalarType0)
+ scalars <- scalars |> select(-c("scalarType"))
+ # "got here1" |> print(); scalars |> glimpse()
- ###### Regional values ######
- scalars_regional <- scalars |> filter(national_or_regional == "regional")
- scalarNames_reg <- scalars_regional[,scalarColName] |> unique()
- df_regional <- data |>
- filter(!(data[,scalarColName] == "none") & data[,scalarColName] %in% scalarNames_reg) |>
- left_join(scalars_regional, by=c("year", "region", scalarColName)) |>
- select(-c("scalarType", "national_or_regional"))
+ ### Scalar columns to rename
+ rename0 <- "scalarName"
+ rename1 <- scalarType0 |> paste0(c("Name"))
+ scalarColName <- rename1
+ scalars <- scalars |> rename_at(c(rename0), ~rename1)
+
+ ###### National vs Regional Scalars ######
+ # scalars$national_or_regional |> unique() |> print()
+ scalars_regional <- scalars |> filter(national_or_regional != "national")
+ scalars_national <- scalars |> filter(national_or_regional == "national")
+ ### Drop columns
+ scalars_regional <- scalars_regional |> select(-c("national_or_regional"))
+ scalars_national <- scalars_national |> select(-c("national_or_regional"))
+ # scalars_national |> glimpse()
+
+ ### Scalar names
+ scalarNames_reg <- scalars_regional[[scalarColName]] |> unique()
+ scalarNames_nat <- scalars_national[[scalarColName]] |> unique()
+ # "got here2" |> print(); scalarNames_nat |> print()
+
+ ###### Create Filters ######
+ ### Filter the df0 to those for which the scalar identifier == "none"...value = 1
+ filter_none <- df0[[scalarColName]] == "none"
+ filter_reg <- df0[[scalarColName]] %in% scalarNames_reg
+ filter_nat <- df0[[scalarColName]] %in% scalarNames_nat
+
+ ###### Filter Data ######
+ df_none <- df0[filter_none,] #|> mutate(value = 1)
+ df_regional <- df0[filter_reg,]
+ df_national <- df0[filter_nat,]
+ ### Whether filtered data has rows
+ has_national <- df_national |> nrow()
+ has_regional <- df_regional |> nrow()
+ # scalars |> glimpse()
+
+ ###### Select Columns ######
+ ### Columns
+ select0 <- c("sector", "sector_label", "modelType")
+ select0 <- select0 |> c("variant", "impactType", "impactYear")
+ select0 <- select0 |> c("region") |> c(stateCols0)
+ select0 <- select0 |> c("sectorprimary", "includeaggregate")
+ select0 <- select0 |> c("byState", "year")
+ select0 <- select0 |> c(scalarType0 |> paste0(c("Name"))) #|> c("value")
+ ### Select
+ df_none <- df_none |> select(all_of(select0)) |> mutate(value=1)
+ df_regional <- df_regional |> select(all_of(select0))
+ df_national <- df_national |> select(all_of(select0))
+
+ ###### Mutate Data ######
+ # scalars_regional |> glimpse(); df_regional |> glimpse();
+ ### Join & drop
+ if(has_regional){
+ join0 <- c("region") |> c(stateCols0) |> c(scalarColName) |> c("byState", "year")
+ df_regional <- df_regional |> left_join(scalars_regional, by=c(join0))
+ rm(join0)
+ } ### End if(has_regional)
###### National values ######
- scalars_national <- scalars |> filter(national_or_regional == "national") |> select(-region)
- scalarNames_nat <- scalars_national[,scalarColName] |> unique()
- df_national <- data |>
- filter(!(data[,scalarColName] == "none") & data[,scalarColName] %in% scalarNames_nat) |>
- left_join(scalars_national, by=c("year", scalarColName)) |>
- select(-c("scalarType", "national_or_regional"))
-
-
- ###### Rename value column ######
- df_x <- rbind(df_none, df_regional, df_national)
- names_x <- df_x |> names()
- names(df_x)[which(names_x == renameCols[2])] <- scalarValName
+ # scalars_national |> glimpse(); df_national |> glimpse();
+ ### Join & drop
+ if(has_national){
+ # join0 <- c(scalarColName) |> c("byState", "year")
+ # drop0 <- c("region") |> c(stateCols0)
+ join0 <- c(scalarColName) |> c("year")
+ drop0 <- c("region") |> c(stateCols0) |> c("byState")
+ scalars_national <- scalars_national |> select(-all_of(drop0))
+ df_national <- df_national |> left_join(scalars_national, by=c(join0))
+ rm(join0, drop0)
+ } ### End if(has_national)
+ # df_national1 |> glimpse()
+
+ ###### Rename ######
+ # df_none |> glimpse(); df_regional |> glimpse()
+ data_x <- df_none |> rbind(df_regional)
+ data_x <- data_x |> rbind(df_national)
+ rm(df_none, df_regional, df_national)
+
+ ### Add placeholder column
+ hasData0 <- data_x |> nrow()
+ # data_x |> glimpse()
+ if(hasData0){
+ ### Replace NA values ?
+ # data_x <- data_x |> mutate(value = value |> replace_na(1))
+ data_x <- data_x |> mutate(value = value)
+ } else{
+ data_x |> mutate(value=1)
+ } ### End if(hasData0)
+
+ ### Rename
+ rename0 <- "value"
+ rename1 <- scalarType0 |> paste0(c("Value"))
+ # "aloha" |> print(); data_x |> names() |> print()
+ data_x <- data_x |> rename_at(c(rename0), ~rename1)
+ ### Join
+ # exCols0 <- scalarType0 |> paste0(c("Name", "Value")) |> c("value")
+ # join0 <- select0 |> (function(x){x[!(x %in% exCols0)]})()
+ join0 <- select0
+ df0 <- df0 |> left_join(data_x, by=c(join0))
- ###### Return results values ######
- return(df_x)
+ ###### Return ######
+ return(df0)
}
-
-
###### get_econAdjValues ######
-### Last updated 2021.02.05
+### Last updated 2023.11.30
### Get Economic Adjustment/Multiplier
### This function matches interpolated scalar values for each scalar type to the time series scenario information
### Scalar types are: physAdj, physMultiplier, damageAdj, econScalar, econMultiplier
### Function "get_econAdjValues" replaces "get_econMultipliers"
get_econAdjValues <- function(
- data, ### Initial results dataframe
- scenario, ### Population and GDP scenario
+ data, ### Initial results dataframe
+ scenario, ### Population and GDP scenario
multipliers ### List of multipliers
){
- # data |> names() |> print();
###### Multipliers
- multiplier0 <- "none"
- multipliers <- multipliers[multipliers!=multiplier0]
+ none0 <- "none"
+ multipliers <- multipliers |> (function(x){x[!(x %in% none0)]})()
###### Scenario information
- cRegions <- scenario$region |> unique()
- cNames <- scenario |> names(); cNames <- cNames[cNames %in% multipliers]
+ # Get column names:
+ cNames <- scenario |> names()
+ cNames <- cNames |> (function(x){x[(x %in% multipliers)]})()
+ ###### By state
+ # Check if data is broken down by state:
+ byState <- "state" %in% cNames
+ # group0 <- c("gdp_usd", "national_pop", "gdp_percap")
+ idCols0 <- c("byState", "year")
+ select0 <- cNames |> c(idCols0)
+ scalars <- scenario |> summarize_seScenario(national=T)
+ scalars <- scalars |> select(all_of(select0))
###### Format scalar data
###### Get values for a single region since the multipliers are the same for all regions
###### Gather scenario information
- scalars <- scenario |> filter(region==cRegions[1])
- scalars <- scalars |> select(c("year", all_of(cNames)))
- scalars <- scalars |> gather(key="econMultiplierName", value="econMultiplierValue", -c("year"))
- # data |> names() |> print(); scalars |> names() |> print()
- ###### Multiplier Adjustment
- ### Rename scalars and convert year to character
- cols0 <- c("year" , "econMultiplierName", "econMultiplierValue")
- cols1 <- c("year0", "econAdjName" , "econAdjValue")
- scalarAdj <- scalars |> rename_at(.vars=c(all_of(cols0)), ~cols1)
- scalarAdj <- scalarAdj |> mutate(year0 = year0 |> as.character())
- # rm("cols0", "cols1")
- # data$year |> class() |> print(); scalarAdj$year |> class() |> print()
+ scalars <- scalars |> pivot_longer(
+ -all_of(idCols0),
+ names_to = "econMultiplierName",
+ values_to = "econMultiplierValue"
+ ) ### End pivot_longer()
+ ### Rename year to year0 and convert to character
+ scalars <- scalars |> mutate(year0 = min(year) |> as.character())
+ # data |> glimpse(); scalars |> glimpse()
###### Format data and separate
- # data |> names() |> print(); scalarAdj |> names() |> print()
+ # data |> glimpse(); scalars |> glimpse()
data <- data |> mutate(econAdjName = econMultiplierName)
- df_none <- data |> filter(econMultiplierName == multiplier0)
- df_oth <- data |> filter(econMultiplierName != multiplier0)
- rm("data")
+ df_none <- data |> filter(econMultiplierName == none0)
+ data <- data |> filter(econMultiplierName != none0)
+ ### Which to do
+ hasNone0 <- df_none |> nrow()
+ hasOther0 <- data |> nrow()
###### ScalarName == "None"
### Columns
mutate0 <- c("econMultiplierValue", "econAdjValue")
drop0 <- c("econAdjName")
### Filter the data to those for which the scalar identifier == "none"...value = 1
### Set econMultiplierValue, econAdjValue == 1 if scalarMultiplierName=none
- if(nrow(df_none)) {df_none[,mutate0] <- 1}
- ###### Other Multipliers
- df_oth <- df_oth |> left_join(scalars , by=c(cols0[!(cols0 %in% mutate0)]))
- # df_oth |> names() |> print(); scalars |> names() |> print()
- # scalarAdj$year0 |> class() |> print(); df_oth$year0 |> class() |> print()
- df_oth <- df_oth |> left_join(scalarAdj, by=c(cols1[!(cols1 %in% mutate0)]))
+ if(hasNone0 ) {df_none[,mutate0] <- 1}
+ if(hasOther0) {
+ ###### Multiplier Adjustment
+ ### Rename scalars and convert year to character
+ # # rename0 <- c("year" , "econMultiplierName", "econMultiplierValue")
+ # # rename1 <- c("year0", "econAdjName" , "econAdjValue")
+ # # join0 <- rename0 |> (function(x){x[!(x %in% mutate0)]})() |> c("byState")
+ # # join1 <- rename1 |> (function(x){x[!(x %in% mutate0)]})() |> c("byState")
+ # rename0 <- c("econMultiplierName", "econMultiplierValue")
+ # rename1 <- c("econAdjName" , "econAdjValue")
+ # join0 <- rename0 |> (function(x){x[!(x %in% mutate0)]})() |> c("byState", "year")
+ # join1 <- rename1 |> (function(x){x[!(x %in% mutate0)]})() |> c("byState", "year0", "year", "econMultiplierName")
+ ### Scalar Adjustments
+ ## scalars |> glimpse()
+ # Take value at base year
+ rename0 <- c("econMultiplierName", "econMultiplierValue")
+ rename1 <- c("econAdjName" , "econAdjValue")
+ drop0 <- c("year")
+ drop1 <- c("econAdjValue")
+ join0 <- c("byState", "year0","econAdjName")
+ base_vals <- scalars |> filter(year == year0)
+ base_vals <- base_vals |> select(-all_of(drop0))|> rename_at(c(rename0), ~rename1)
+ scalarAdj <- scalars |> rename_at(c(rename0), ~rename1)
+ scalarAdj <- scalarAdj |> select(-all_of(drop1))
+ scalarAdj <- scalarAdj |> left_join(base_vals, by=c(join0), relationship = "many-to-many")
+ rm(rename0, rename1, drop0, drop1, join0)
+ # scalarAdj <- scalarAdj |> mutate(year0 = year0 |> as.character())
+ # data$year |> class() |> print(); scalarAdj$year |> class() |> print()
+
+ ###### Join with scalars
+ # scalars <- scalars |> left_join(scalarAdj, by = c("byState", "year0","year", "econMultiplierName"="econAdjName")) |> mutate(econAdjName = econMultiplierName)
+ join0 <- c("byState", "year0","year", "econAdjName")
+ # join1 <- c("econMultiplierName") |> c("year0", "year", "econMultiplierName", "byState")
+ join1 <- c("econMultiplierName") |> c("year0", "year", "econAdjName", "byState")
+ scalars <- scalars |> mutate(econAdjName = econMultiplierName)
+ # scalars |> glimpse(); scalarAdj |> glimpse(); data |> glimpse()
+ scalars <- scalars |> left_join(scalarAdj, by=c(join0))
+ data <- data |> left_join(scalars , by=c(join1))
+ } ### End if(hasOther0)
+
###### Rename value column
- df_x <- df_none |> rbind(df_oth) |> select(-c(all_of(drop0)))
+ data <- data |> rbind(df_none) #|> select(-all_of(drop0))
+ # data |> glimpse()
###### Return results values
- return(df_x)
+ return(data)
+}
+
+###### initialize_resultsDf ######
+### Initialize results data frame
+initialize_resultsDf <- function(
+ df_se, ### Dataframe with socioeconomic scenario
+ df_info, ### Dataframe with sectors info
+ df_scalars, ### Dataframe of main scalars
+ elasticity = NULL
+){
+ ###### By State ######
+ # if(byState){stateCols0 <- c("state", "postal")} else{stateCols0 <- c()}
+ stateCols0 <- c("state", "postal")
+ byState <- (stateCols0 %in% (df_se |> names())) |> any()
+ popCol <- byState |> ifelse("state_pop", "reg_pop")
+
+ ###### Adjust Data ######
+ ### Adjust scenario info for SLR sectors
+ # df_se <- df_se |> summarize_seScenario(national=T)
+ df_se <- df_se |> summarize_seScenario(national=F)
+
+ ###### Initialize Results ######
+ ### Initialized results: Join sector info with socioeconomic scenario
+ # df_se |> glimpse(); df_info |> glimpse(); df_scalars |> glimpse()
+ join0 <- c("byState")
+ df0 <- df_info |> left_join(df_se, by=c(join0), relationship="many-to-many")
+ rm(join0)
+ # df0 |> glimpse()
+
+ ###### Update Scalar Info ######
+ ### Update scalar info
+ ### Physical scalars
+ df0 <- df0 |> match_scalarValues(df_scalars, scalarType="physScalar")
+ ### Physical adjustment
+ df0 <- df0 |> match_scalarValues(df_scalars, scalarType="physAdj")
+ ### Damage adjustment
+ df0 <- df0 |> match_scalarValues(df_scalars, scalarType="damageAdj")
+ ### Economic scalar
+ df0 <- df0 |> match_scalarValues(df_scalars, scalarType="econScalar")
+
+ ###### Economic Adjustment Values ######
+ ### Get economic adjustment values
+ df_mult <- "co_econMultipliers" |> get_frediDataObj("frediData")
+ df0 <- df0 |> get_econAdjValues(scenario=df_se, multipliers=df_mult[["econMultiplierName"]])
+
+ ###### Calculate Scalars ######
+ ### Calculate scalars
+ df0 <- df0 |> calcScalars(elasticity = elasticity)
+
+ ###### SLR Scalars for Years > 2090 ######
+ ### Scalars for SLR past 2090
+ slrScalars <- "co_slrScalars" |> get_frediDataObj("frediData")
+ types0 <- df0[["modelType"]] |> unique() |> tolower()
+ refYear0 <- (slrScalars[["refYear"]] |> unique())[1]
+ has_slr <- "slr" %in% types0
+ maxYr0 <- df0[["year"]] |> max()
+ do_slr <- has_slr & (maxYr0 > refYear0)
+ if(do_slr){
+ ### Separate GCM & SLR values
+ df_gcm0 <- df0 |> filter(modelType |> tolower() != "slr")
+ df_slr0 <- df0 |> filter(modelType |> tolower() == "slr")
+
+ ### Filter to reference year
+ df_slr1 <- df_slr0 |> filter(year <= refYear0)
+ df_slr2 <- df_slr0 |> filter(year >= refYear0)
+ rm(df_slr0)
+
+ ### Get extended scalars
+ df_slr2 <- df_slr2 |> extend_slrScalars(
+ df_se = df_se,
+ df_scalars = df_scalars,
+ elasticity = elasticity
+ ) ### End extend_slrScalars
+ ### Ensure there are no duplicate years
+ df_slr2 <- df_slr2 |> filter(year > refYear0)
+
+ ### Add results back together
+ df_slr0 <- df_slr1 |> rbind(df_slr2)
+ df0 <- df_gcm0 |> rbind(df_slr0)
+ rm(df_slr1, df_slr2, df_slr0, df_gcm0)
+ } ### End if(do_npd)
+
+ ###### Return ######
+ ### Return
+ return(df0)
}
@@ -246,176 +800,276 @@ calcScalars <- function(
){
###### Calculate physical scalar ######
### Physical scalars are the product of the physical scalar, the physical adjustment, and the damage adjustment
- df_x <- data |> mutate(physScalar = physScalarValue * physAdjValue * damageAdjValue )
+ # data |> glimpse()
+ data <- data |> mutate(physScalar = physScalarValue * physAdjValue * damageAdjValue )
###### Adjust Elasticity for VSL ######
### Adjust Elasticity for VSL only
- if(!is.null(elasticity)){
- df_not_vsl <- df_x |> filter(econScalarName!="vsl_usd")
- df_vsl <- df_x |> filter(econScalarName=="vsl_usd") |> mutate(exp0 = elasticity)
- df_x <- df_not_vsl |> rbind(df_vsl); rm("df_not_vsl", "df_vsl")
- # if(is.numeric(elasticity)){df_x <- df_x |> mutate(exp0 = elasticity)}
+ if(!(elasticity |> is.null())){
+ data <- data |> mutate(exp0 = (econScalarName=="vsl_usd") |> ifelse(elasticity, exp0))
+ # if(is.numeric(elasticity)){data <- data |> mutate(exp0 = elasticity)}
}
- ###### Calculate economic adjustment ######
+ ###### Economic adjustments ######
### Economic multipliers are the economic multiplier value divided by the adjustment
### The economic multiplier value is 1, GDP, or GDP per capita
### The economic adjustment value is usually the economic multiplier value at a reference year
- df_x <- df_x |> mutate(econMultiplier = (econMultiplierValue / econAdjValue)**exp0 )
+ data <- data |> mutate(econMultiplier = (econMultiplierValue / econAdjValue)**exp0 )
- ###### Calculate economic scalar ######
+ ###### Economic scalars ######
### The economic scalar is calculated using the following equation.
### Constants c0, c1, and exp0 are from the
- df_x <- df_x |> mutate(econScalar = c0 + c1 * econScalarValue * (econMultiplier) )
+ data <- data |> mutate(econScalar = c0 + c1 * econScalarValue * (econMultiplier) )
- ###### Calculate economic-physical scalar ######
+ ###### Economic-physical scalar ######
### Combine the physical and economic scalar.
- df_x <- df_x |> mutate(physEconScalar = econScalar * physScalar )
+ data <- data |> mutate(physEconScalar = econScalar * physScalar )
###### Return ######
- return(df_x)
+ return(data)
}
-###### get_scenario_id ######
-### Function to standardize the creation of the scenario_id
-get_scenario_id <- function(
- data_x,
- include=c("model_dot", "region_dot") ### Character vector of column names to include
+###### get_gcmScaledImpacts ######
+get_gcmScaledImpacts <- function(
+ df0, ### Data frame of initial results
+ df1 ### Data frame of drivers
){
- ### Other vals
- mnl0 <- "\n" ### Message new line
- msg0 <- "\t" ### Message indent level 0
- mcom <- ", " ### Comma for collapsing lists
- mqu0 <- "\'" ### Message quote
- mqu1 <- mqu0 |> paste0(mcom, mqu0, collapse="")
- mend0 <- "..."
- ### Columns to include
- main0 <- c("sector", "variant", "impactYear", "impactType", "model_type")
- cols0 <- main0 |> c(include)
- ### Check names
- names0 <- data_x |> names()
- cCheck <- (cols0 %in% names0)
- nCheck <- (!cCheck) |> which() |> length()
- if(nCheck){
- c("In get_scenario_id:") |> c(mnl0, msg0) |>
- c("Data is missing columns ") |> c(mqu0, paste(cols0[!cCheck], collapse=mqu1), mqu0, mend0) |>
- c("Creating `scenario_id` from columns ") |> c(mqu0, paste(cols0[cCheck], collapse=mqu1), mqu0, mend0)
- ### New names
- cols0 <- cols0[cCheck]
+ ### By state
+ # df0 |> glimpse()
+ names0 <- df0 |> names()
+ byState <- "state" %in% names0
+ if(byState){stateCols0 <- c("state", "postal")} else{stateCols0 <- c()}
+ rm(names0)
+
+ ### Get df_imp0 & list_impactFunctions
+ sectors0 <- df0[["sector"]] |> unique()
+ df_imp0 <- "data_scaledImpacts" |> get_frediDataObj("stateData")
+ df_imp0 <- df_imp0 |> filter(sector %in% sectors0)
+
+ ### Drivers
+ df1 <- df1 |> filter(modelType == "gcm")
+ xVar0 <- df1[["modelUnitValue"]]
+ years0 <- df1[["year" ]]
+
+ ### Get list of unique impact functions
+ funList0 <- "list_impactFunctions" |> get_frediDataObj("stateData")
+ funNames0 <- funList0 |> names() |> unique()
+ df0 <- df0 |> mutate(hasScenario = scenario_id %in% funNames0)
+
+ ### Check whether the scenario has an impact function (scenarios with all missing values have no functions)
+ gcmFuns0 <- (df0 |> filter(hasScenario == 1))[["scenario_id"]] |> unique()
+ gcmNoFuns0 <- (df0 |> filter(hasScenario != 1))[["scenario_id"]] |> unique()
+ # df_gcm <- tibble(scenario_id = gcmFuns0)
+ hasFuns0 <- gcmFuns0 |> length() > 0
+ hasNoFuns0 <- gcmNoFuns0 |> length() > 0
+ # df_gcm <- df_gcm |> mutate(hasScenario = 1 * (scenario_id %in% funNames0))
+
+ # testIds0 <- (df_imp0 |> filter(sector=="WindDamage" & hasScenario==1))$scenario_id |> unique()
+ # testIds1 <- df0$scenario_id |> unique()
+ # (c("WindDamage_NA_NA_NA_Northeast_N/A_N/A_MIROC5", "WindDamage_NA_NA_NA_Northeast_N/A_N/A_CCSM4") %in% testIds0) |> print()
+ # (c("WindDamage_NA_NA_NA_Northeast_N/A_N/A_MIROC5", "WindDamage_NA_NA_NA_Northeast_N/A_N/A_CCSM4") %in% gcmFuns0) |> print()
+ # gcmFuns0[!(gcmFuns0 %in% testIds0)] |> head() |> print()
+ # testIds0[!(testIds0 %in% gcmFuns0)] |> head() |> print()
+
+ ### Initialize results
+ df0 <- tibble()
+ rename0 <- c("xVar")
+ rename1 <- c("modelUnitValue")
+ select0 <- c("scenario_id", "year", rename1, "scaled_impacts")
+
+ ### Get impacts for scenario_ids that have functions
+ if(hasFuns0){
+ ### Subset impact list
+ funList0 <- funList0 |> (function(x){x[ names(x) %in% gcmFuns0 ]})()
+ ### Get impacts
+ df_hasFuns <- funList0 |> interpolate_impacts(xVar = xVar0, years = years0)
+ df_hasFuns <- df_hasFuns |> rename_at(c(rename0), ~rename1) #|> filter(year>=minYear)
+ df_hasFuns <- df_hasFuns |> select(all_of(select0))
+ df0 <- df0 |> rbind(df_hasFuns)
+ rm(funList0, df_hasFuns)
+ } #; return(df_i)
+ if(hasNoFuns0){
+ ### Initialize values
+ df_noFuns0 <- tibble(scenario_id = gcmNoFuns0)
+ ### Join with driver values
+ join0 <- "joinCol"
+ df_noFuns0 <- df_noFuns0 |> mutate(joinCol=1) |>
+ left_join(
+ df1 |> mutate(joinCol=1),
+ by = c(join0)
+ ) |>
+ select(-all_of(join0))
+ ### Add scaled impact
+ df_noFuns0 <- df_noFuns0 |> mutate(scaled_impacts = NA)
+ df_noFuns0 <- df_noFuns0 |> select(all_of(select0))
+ # df0 |> names() |> print(); df_noFuns0 |> names() |> print()
+ df0 <- df0 |> rbind(df_noFuns0)
+ rm("df_noFuns0")
}
- scen_x <- data_x[,cols0]
- scen_x <- scen_x |> apply(1, function(x){as.vector(x) |> paste(collapse ="_")}) |> unlist()
- data_x <- data_x |> mutate(scenario_id = scen_x)
- return(data_x)
+
+ ### Arrange
+ arrange0 <- c("scenario_id", "year")
+ df0 <- df0 |> arrange_at(c(arrange0))
+
+ ### Return
+ return(df0)
}
+###### get_slrScaledImpacts ######
+get_slrScaledImpacts <- function(
+ df0, ### Initial results for SLR sectors
+ df1 ### Driver data frames
+ ){
+ ####### By State
+ ### By state
+ byState <- "state" %in% (df0 |> names())
+ if(byState){stateCols0 <- c("state", "postal")} else{stateCols0 <- c()}
+
+ ###### Get rDataList Objects #######
+ ### Get objects from rDataLsit
+ df_ext0 <- "slrExtremes" |> get_frediDataObj("stateData")
+ df_imp0 <- "slrImpacts" |> get_frediDataObj("stateData")
+ co_modTypes <- "co_modelTypes" |> get_frediDataObj("frediData")
+ # co_models <- "co_models" |> get_frediDataObj("frediData")
+
+ ###### Values #######
+ slr0 <- "slr"
+ # co_modTypes |> glimpse()
+ co_modTypes <- co_modTypes |> rename(modelType = modelType_id)
+ slrMax0 <- (co_modTypes |> filter(modelType==slr0))[["modelMaxOutput"]][1]
+
+ ###### Format Data #######
+ ### Filter to appropriate driver values
+ df1 <- df1 |> filter(modelType |> tolower() == slr0) #|> rename(model_type=modelType)
+ # df1 |> glimpse()
-###### get_impactFunctions ######
-### Last updated 2021.02.05
-### Get Impact Functions (createSystemData)
-### This is used by createSystemData (see inst/extdata/createSystemData.R) to generate impact functions
-### This function can be run separately and its outputs saved as an R data object to facilitate computation time.
-get_impactFunctions <- function(
- x = NULL, ### Data frame with scaled impacts data
- groupCol = NULL, ### Which column to look for the scenario column name (default = temp_impact_scenario )
- xCol = NULL, ### Which column to use for x (default = temp_C)
- yCol = NULL, ### Which column to use for y
- # extrapolate = FALSE, ### Whether to extrapolate by default
- extend_from = NULL, ### Maximum value for model type to extend from, if not missing
- extend_to = NULL, ### Extend last points for x
- extend_all = FALSE, ### Whether to extend all models or just those that go to the max model value
- unitScale = NULL ### Scale between values
-){
- ###### Defaults ######
- unitScale <- ifelse(is.null(unitScale), 1, unitScale)
- # extend_to <- ifelse(is.null(extend_to ), 1, unitScale)
- ###### Group data ######
- x$group_id <- x[,groupCol]
- x$xIn <- x[, xCol]
- x$yIn <- x[, yCol]
- ###### Extend from/to ######
- ### Make sure they are numeric
- extend_from <- extend_from |> as.character() |> as.numeric()
- extend_to <- extend_to |> as.character() |> as.numeric()
-
- ###### Groups ######
- ### Column Names
- # names_x <- c(groupCol, xCol, yCol)
- ### Create groups and get group keys
- ### Group keys
- x <- x |> group_by(group_id)
- groups_x <- (x |> group_keys())$group_id |> unique()
-
- ### Initialize data
- xIn_min <- 0
- yIn_min <- 0
- df_0 <- data.frame(xIn = xIn_min, yIn = yIn_min)
- # df_0 <- data.frame(xIn = 0, yIn = 0)
- ###### Generate list of impact functions ######
- ### Iterate over the groups
- list_x <- x |>
- group_map(function(.x, .y, .keep=T){
- group_i <- .x[,groupCol] |> unique()
-
- ###### Subset values ######
- ### Subset data to scenario name and exclude NA values, then add a zero value
- df_i <- .x |> select(xIn, yIn) |> filter(!is.na(yIn))
- df_i <- df_0 |> rbind(df_i)
-
- ###### Information about Extrapolation values ######
- ### Length of df_i
- len_i <- df_i |> nrow()
- # ### Extend values out to 10 degrees of warming
- xIn_max <- df_i$xIn[len_i]
- yIn_max <- df_i$yIn[len_i]
- yMaxNew <- NA
-
- # extrapolate |> print(())
- ### Whether to extend values
- ### Extend values out to the specified value
- ### - Find linear relationship between last two points
- ### - Interpolate the last few values
- # extrapolate <- TRUE
- extrapolate <- (xIn_max == extend_from) & (extend_from!=extend_to)
- if(extend_all) extrapolate <- TRUE
- # extrapolate |> print()
- if(extrapolate){
- df_ref_i <- df_i[len_i + -1:0,]
- # df_ref_i |> print()
- ### Get linear trend
- lm_i <- lm(yIn~xIn, data=df_ref_i)
- ### Extend values
- df_new_i <- data.frame(xIn = seq(xIn_max + unitScale, extend_to, unitScale))
- df_new_i <- df_new_i |> mutate(yIn = xIn * lm_i$coefficients[2] + lm_i$coefficients[1])
- ### Bind the new observations with the other observations
- df_i <- df_i |> rbind(df_new_i)
- ### Sort and get new y value to extend to
- which_i <- (df_i$xIn == extend_to) |> which()
- yMaxNew <- df_i$yIn[which_i]
- }
-
- ###### Linear Interpolation ######
- ### Create a piece-wise linear interpolation function using approxfun and defaults
- ### rule = 1 (Returns NA for x-values outside range)
- ### ties = mean (take the average of multiple values)
- # fun_i <- approxfun(x = df_i$xIn, y = df_i$yIn, method = "linear", rule = 1)
- fun_i <- approxfun(
- x = df_i$xIn,
- y = df_i$yIn,
- method = "linear",
- yleft = yIn_min,
- yright = yMaxNew
- )
-
- return(fun_i)
-
- }) ### End group map
-
- ##### Add names to the list
- names(list_x) <- groups_x
-
- ###### Return Object ######
- return(list_x)
+ ### Filter to appropriate years
+ maxYear0 <- df1[["year"]] |> max()
+ df_ext0 <- df_ext0 |> filter(year <= maxYear0)
+ df_imp0 <- df_imp0 |> filter(year <= maxYear0)
+
+ ### Filter to appropriate sectors
+ sectors0 <- df0[["sector"]] |> unique()
+ df_ext0 <- df_ext0 |> filter(sector %in% sectors0)
+ df_imp0 <- df_imp0 |> filter(sector %in% sectors0)
+
+ ###### Get scenario_id #######
+ ### Join with df0 to get scenario_id
+ join0 <- c("sector", "variant", "impactType", "impactYear", "region") |> c(stateCols0) |> c("byState") |> c("year")
+ select0 <- join0 |> c("scenario_id")
+ drop0 <- join0 |> (function(x){x[!(x %in% c("year"))]})()
+ df0 <- df0 |> select(all_of(select0))
+ ### Join
+ # df0 |> glimpse(); df_ext0 |> glimpse()
+ # df0 |> group_by_at(c(join0)) |> summarize(n=n(),groups="keep") |> filter(n>1) |> glimpse()
+ # df_ext0 |> group_by_at(c(join0)) |> summarize(n=n(),groups="keep") |> filter(n>1) |> glimpse()
+ df_ext0 <- df_ext0 |> left_join(df0, by=c(join0))
+ df_imp0 <- df_imp0 |> left_join(df0, by=c(join0))
+ # "got here1" |> print(); df_ext0 |> glimpse(); df_imp0 |> glimpse()
+ ### Drop
+ df_ext0 <- df_ext0 |> select(-all_of(drop0))
+ df_imp0 <- df_imp0 |> select(-all_of(drop0))
+ rm(df0); rm(join0, select0, drop0)
+
+ ###### Add driver values #######
+ ### Join data with drivers
+ join0 <- c("year")
+ select0 <- c("scenario_id")
+ drop0 <- c("model_type")
+ df_max0 <- df1 |> left_join(df_ext0, by=c(join0))
+ df_slr0 <- df1 |> left_join(df_imp0, by=c(join0))
+ ### Drop columns
+ df_max0 <- df_max0 |> select(-any_of(drop0))
+ df_slr0 <- df_slr0 |> select(-any_of(drop0))
+ ### Relocate columns
+ df_max0 <- df_max0 |> relocate(all_of(select0))
+ df_slr0 <- df_slr0 |> relocate(all_of(select0))
+ rm(df_ext0, df_imp0);
+ rm(join0, select0, drop0)
+ # "got here1" |> print(); df_max0 |> glimpse(); df_slr0 |> glimpse()
+ # rm(select0, select1)
+
+ ###### Scaled Impacts >= Max #######
+ ### Filter to appropriate years
+ df_max0 <- df_max0 |> filter(modelUnitValue >= driverValue_ref)
+ maxYrs0 <- df_max0 |> get_uniqueValues(column="year")
+ nrow_max <- maxYrs0 |> length()
+
+ ## Calculate scaled impacts for values > slrMax0
+ if(nrow_max){
+ df_max0 <- df_max0 |> mutate(deltaDriver = modelUnitValue - driverValue_ref)
+ df_max0 <- df_max0 |> mutate(scaled_impacts = impacts_intercept + impacts_slope * deltaDriver)
+ # df_max0 |> filter(deltaDriver < 0) |> nrow() |> print()
+ } else{
+ df_max0 <- df_max0 |> mutate(scaled_impacts = NA)
+ } ### End if(nrow_max)
+
+ ###### Scaled Impacts < Max #######
+ ### Get impacts and create scenario ID for values <= slrMax0
+ ### Join with slrImpacts
+ df_slr0 <- df_slr0 |> filter(!(year %in% maxYrs0))
+ nrow_oth <- df_slr0 |> nrow()
+ # "got here2" |> print()
+
+ ### Group by cols
+ # cols0 <- c("modelType", "modelUnitValue")
+ # cols1 <- c("model_type", "driverValue")
+ cols0 <- c("modelUnitValue")
+ cols1 <- c("driverValue")
+ ### Group by cols
+ slr_names <- df_slr0 |> names()
+ select0 <- c("scenario_id")
+ group0 <- select0 |> (function(x){x[x %in% (df_slr0 |> names())]})()
+ group0 <- group0 |> c(cols1)
+ # rm("group0", "slr_names")
+
+ ### Calculate impacts for values not above the maximum value
+ # nrow_oth |> print()
+ if(nrow_oth){
+ #### Interpolate driver values
+ mutate0 <- c("lower_model", "upper_model")
+ slrVals0 <- df1 |> rename_at(c(cols0), ~cols1)
+ slrVals0 <- slrVals0 |> interp_slr_byYear()
+ slrVals0 <- slrVals0 |> mutate_at(c(mutate0), function(y){gsub(" ", "", y)})
+
+ ### Interpolate
+ # df_slr0 |> glimpse()
+ # df_slr0 <- df_slr0 |> rename_at(c(cols0), ~cols1)
+ df_slr0 <- df_slr0 |> fredi_slrInterp(slr_x=slrVals0, groupByCols=group0)
+ # df_slr0 <- df_slr0 |> rename_at(c(cols1), ~cols0)
+ rm(group0, slr_names, slrVals0)
+ rm(cols0, cols1)
+ } else{
+ df_slr0 <- df_slr0 |> mutate(scaled_impacts = NA)
+ } ### End if(nrow_oth)
+ # df_slr0 |> filter(!is.na(scaled_impacts)) |> nrow() |> print()
+
+ ### Get scenario ID and adjust the model value
+ # df_max0 |> glimpse(); df_slr0 |> glimpse()
+ select1 <- select0 |> c("year", "modelUnitValue", "scaled_impacts") |> unique()
+ df_slr0 <- df_slr0 |> select(all_of(select1))
+ df_max0 <- df_max0 |> select(all_of(select1))
+ df_slr0 <- df_slr0 |> rbind(df_max0)
+ rm(df_max0, select1)
+
+ ###### Arrange ######
+ ### Add other results back in
+ # "got here7" |> print()
+ arrange0 <- c("scenario_id", "year")
+ select1 <- arrange0 |> c("modelUnitValue", "scaled_impacts")
+ df_slr0 <- df_slr0 |> select(all_of(select1))
+ df_slr0 <- df_slr0 |> arrange_at(c(arrange0))
+ # slrIds <- df_slr0[["scenario_id"]] |> unique() |> sort()
+ rm(select1, arrange0)
+ # slrIds |> head() |> print();
+
+ ###### Filter ######
+ # df_slr0 <- df_slr0 |> mutate(across("scenario_id",str_replace, '(\\d)[0-9]{1,3}cm', '\\Interpolation'))
+ df_slr0 <- df_slr0 |> filter(!(scaled_impacts |> is.na()))
+ # df_slr0 |> glimpse()
+
+ ###### Return ######
+ return(df_slr0)
}
@@ -434,19 +1088,19 @@ interpolate_impacts <- function(
numFunctions <- functions |> length()
### Iterate over the groups
- scaledImpacts_x <- 1:numFunctions |> lapply(function(i){
+ scaledImpacts_x <- 1:numFunctions |> map(function(i){
### Group, get group function, then get impacts
scenario_i <- functionNames[i]
fun_i <- functions[[scenario_i]]
scaledImpacts_i <- xVar |> fun_i()
- df_i <- data.frame(
+ df_i <- tibble(
year = years,
xVar = xVar,
scaled_impacts = scaledImpacts_i,
scenario_id = scenario_i
)
return(df_i)
- }) |> (function(i){do.call(rbind, i)})() ### End group map
+ }) |> bind_rows() ### End group map
# scaledImpacts_x |> names() |> print()
return(scaledImpacts_x)
}
@@ -459,471 +1113,262 @@ get_annual_model_stats <- function(
data = NULL, ### Dataframe of results
sectors = NULL, ### Name of sectors to get statistics for
yVar = "annual_impacts", ### Column to get averages for
- groupCol = c("sector", "variant", "model_type", "impactType", "impactYear") ### Column(s) to use for grouping
+ groupCols = c("sector", "variant", "impactType", "impactYear", "model_type") ### Column(s) to use for grouping
){
- ###### Ungroup data ######
- # data <- data |> ungroup()
- ###### Subset to sector ######
+ if (byState) {stateCols0 <- c("state", "postal")} else{stateCols0 <- c()}
### Get unique sectors if none are specified
- defaultSectors <- data$sector |> as.character() |> unique()
- if(is.null(sectors)){sectors <- defaultSectors}
-
- ###### Names of Data ######
- ### Models
- model_labels <- data$model |> unique()
- num_models <- model_labels |> length()
-
- ###### Rename the columns ######
- ### To standardize
- renameCols <- c(yVar)
- newColNames <- c("yvar")
-
- ### Keep track of the data names, filter to the standardized data names, then rename the desired column
- # data |> names() |> print()
- data <- data |>
- (function(y){
- names_y <- y |> names()
- whichVar <- which(names_y == yVar)
- names(y)[whichVar] <- "yvar"
- return(y)
- })()
+ sectors0 <- data[["sector"]] |> as.character() |> unique()
+ if(sectors |> is.null()){sectors <- sectors0}
+
+ ### By State
+ cNames <- data |> colnames()
+ byState <- "state" %in% cNames
+
+ ### Rename columns
+ # data |> glimpse()
+ rename0 <- c(yVar)
+ rename1 <- c("yvar")
+ data <- data |> rename_at(c(rename0), ~rename1)
+ rm(rename0, rename1)
###### Which observations are NA ######
### Determine which observations are NA
- data <- data |>
- mutate(not_na = !is.na(yvar)) |>
- mutate(not_na = not_na * 1)
+ data <- data |> mutate(not_na = !(yvar |> is.na()))
+ data <- data |> mutate(not_na = not_na * 1)
### Model Type
- model_aves_x <- c("Model Average", "Interpolation") ### Labels for model averages
- model_type_x <- (data$model_type |> unique())[1]
- model_label_x <- ifelse(tolower(model_type_x)=="gcm", model_aves_x[1], model_aves_x[2])
+ modelAves0 <- c("Model Average", "Interpolation") ### Labels for model averages
+ modelType0 <- data[["model_type"]] |> unique() |> tolower()
+ modelType0 <- modelType0[1]
+ modelLbl0 <- (modelType0=="gcm") |> ifelse(modelAves0[1], modelAves0[2])
###### Reshape the data ######
- # default_groupCols <- c("sector", "variant", "model_type", "impactType", "impactYear", "region")
- default_groupCols <- c("sector", "variant", "model_type", "impactType", "impactYear", "region", "year", "model", "yvar")
- groupByCols <- default_groupCols[which(default_groupCols %in% names(data))]
+ # groupCols0 <- c("sector", "variant", "model_type", "impactType", "impactYear", "region")
+ # if(groupCols |> is.null()){groupCols <- groupCols0}
+ groupCols0 <- c("sector", "variant", "impactType", "impactYear", "region") |> c(stateCols0)
+ groupCols0 <- groupCols0 |> c("model", "model_type")
+ groupCols0 <- groupCols0 |> c("year", "yvar")
+ groupByCols <- groupCols0 |> (function(x){x[x %in% (data |> names())]})()
### Reshape the data and prepare a column indicating which rows have is.na() for all models
- data <- data |>
- select(c(all_of(groupByCols))) |>
- mutate(not_na = !is.na(yvar))
+ data <- data |> select(all_of(groupByCols))
+ data <- data |> mutate(notNA = !(yvar |> is.na()))
###### Summarize by group columns ######
### Add group column with year
- groupByCols <- groupByCols[which(!(groupByCols %in% c("model", "yvar")))]
- df_summary <- data |>
- group_by_at(c(all_of(groupByCols))) |>
- summarize_at(.vars = c("not_na"), sum, na.rm=T) |>
- rename(sum_notNA = not_na) |>
- mutate(
- sum_notNA = (sum_notNA > 0)*1,
- sum_notNA = sum_notNA |> na_if(0)
- )
+ group0 <- groupByCols |> (function(x){x[!(x %in% c("model", "yvar"))]})()
+ sum0 <- c("notNA" )
+ rename0 <- c("sumNotNA")
+ ### Summarize values
+ df_summary <- data |>
+ group_by_at(c(group0)) |>
+ summarize_at(c(sum0), sum, na.rm=T) |> ungroup()
+ ### Rename columns
+ df_summary <- df_summary |> rename_at(c(sum0), ~rename0)
+ df_summary <- df_summary |> mutate(sum_notNA = 1 * (sum_notNA > 0))
+ df_summary <- df_summary |> mutate(sum_notNA = sum_notNA |> na_if(0))
###### Add the summary back into the data ###### groupByCols
- data <- data |> left_join(df_summary, by = c(groupByCols))
+ data <- data |> left_join(df_summary, by = c(groupByCols))
###### Calculate stats ######
### Separate observations that are all NA from those that have at least one non NA value
- is_naOnly <- data$sum_notNA |> is.na()
-
- ### Treat NA only values separate from those with non NA values
- ### First figure out which are which
- which_naOnly <- is_naOnly |> which()
- which_nMiss <- (!is_naOnly) |> which()
- ### Number of each
- num_naOnly <- which_naOnly |> length()
- num_nMiss <- which_nMiss |> length()
### Initialize dataframes
- data_naOnly <- data.frame()
- data_nMiss <- data.frame()
-
- if(num_naOnly > 0){
- data_naOnly <- data[which_naOnly,] |> select(-sum_notNA) |>
- mutate(min = NA, mean = NA, max=NA)
- }
-
- if(num_nMiss > 0){
- data_nMiss <- data[which_nMiss,] |> select(-sum_notNA) |>
- group_by_at(c(all_of(groupByCols))) |>
- summarize_at(.vars=c("yvar"), tibble::lst(min, mean, max), na.rm=T)
- }
-
- ###### Bind results together ######
- df_results <- data_nMiss |>
- rbind(data_naOnly) |>
- mutate(model=model_label_x) |>
- rename(modelMin = min, modelAve=mean, modelMax=max) |>
- ungroup()
+ data_naOnly <- data |> filter( sum_notNA |> is.na() ) |> select(-all_of(drop0))
+ data_nMiss <- data |> filter(!(sum_notNA |> is.na())) |> select(-all_of(drop0))
+ ### Number of rows
+ nrow_naOnly <- data_naOnly |> nrow()
+ nrow_nMiss <- data_nMiss |> nrow()
+ ### Drop columns
+ drop0 <- c("sum_notNA")
+ data <- data |> select(-all_of(drop0))
+ rm(drop0)
+
+ ### If there are values that are only NA, make values NA
+ if(nrow_naOnly){
+ mutate0 <- c("min", "mean", "max")
+ data_naOnly <- data_naOnly |> mutate_at(c(mutate0), function(y){NA})
+ } ### End if(nrow_naOnly)
+
+ ### Otherwise, calculate stats
+ if(nrow_nMiss){
+ df_sum0 <- tibble::lst(min, mean, max)
+ sum0 <- c("yvar")
+ data_nMiss <- data_nMiss |>
+ group_by_at(c(groupByCols)) |>
+ summarize_at(c(sum0), df_sum0, na.rm=T) |>
+ ungroup()
+ rm(df_sum0, sum0)
+ } ### End if(nrow_nMiss)
+
+ ### Bind values
+ df_results <- data_nMiss |> rbind(data_naOnly)
+ rm(data_nMiss, data_naOnly)
+
+ ###### Bind results ######
+ rename0 <- c("min", "mean", "max")
+ rename1 <- "model" |> paste0(rename0 |> str_to_title())
+ df_results <- df_results |> mutate(model=modelLbl0)
+ df_results <- df_results |> rename_at(c(rename0), ~rename1)
+ rm(rename0, rename1)
###### Return ######
return(df_results)
}
-###### slr_Interp_byYear ######
+###### interp_slr_byYear ######
### utils for aggregate_impacts
-slr_Interp_byYear <- function(
+interp_slr_byYear <- function(
data, ### Driver scenario with columns year, slr_cm
yCol = "driverValue", ### Column to look for the driver value
silent=TRUE
){
###### Defaults ######
### Rename y Column
- if(is.null(yCol)){yCol <- "driverValue"}
+ if(yCol |> is.null()){yCol <- "driverValue"}
oldColName_y <- yCol |> c()
newColName_y <- "yValue" |> c()
newColRef_y <- newColName_y |> paste0("_ref")
- data <- data |> rename_at(.vars=c(all_of(oldColName_y)), ~newColName_y)
+ data <- data |> rename_at(c(oldColName_y), ~newColName_y)
### Messaging
- if(is.null(silent)){silent <- T}
- if(silent){msgUser <- F} else{msgUser <- T}
+ if(silent |> is.null()){silent <- T}
+ msgUser <- !silent
+
+ ###### Get Data Objects ######
+ co_models <- "co_models" |> get_frediDataObj("frediData")
+ slr_df <- "slr_cm" |> get_frediDataObj("frediData")
###### Assign data ######
### SLR scenario info
- assign("co_models", rDataList[["co_models"]])
+ # assign("co_models", rDataList[["frediData"]][["data"]][["co_models"]])
+ # co_models <- "co_models" |> get_frediDataObj("frediData")
co_slrs <- co_models |> filter(modelType=="slr") |> rename(model=model_label)
- slr_levels <- c("0cm", co_slrs$model_dot)
- slr_labels <- c("0 cm", co_slrs$model)
+ slr_levels <- c("0cm" ) |> c(co_slrs[["model_dot"]])
+ slr_labels <- c("0 cm") |> c(co_slrs[["model"]])
slr_orders <- slr_levels |> factor(levels=slr_levels) |> as.numeric()
slr_min <- (slr_orders |> min(na.rm=T)) #+ 1
slr_max <- slr_orders |> max(na.rm=T)
### Sea level rise information
- assign("slr_df", rDataList[["slr_cm"]])
- df_slr_years <- slr_df$year |> unique()
+ # assign("slr_df", rDataList[["slr_cm"]])
+ df_slr_years <- slr_df[["year"]] |> unique()
### Refactor model
- slr_df <- slr_df |>
- mutate(model = model |> as.character()) |>
- mutate(model_factor = model |> factor(slr_levels, slr_labels)) |>
- mutate(model_level = model_factor |> as.numeric()) |>
- arrange_at(.vars=c("model_level", "year")) |>
- mutate(model = model_factor |> as.character()) |>
- select(-c("model_factor")) |> as.data.frame()
+ slr_df <- slr_df |> mutate(model = model |> as.character())
+ slr_df <- slr_df |> mutate(model_factor = model |> factor(slr_levels, slr_labels))
+ slr_df <- slr_df |> mutate(model_level = model_factor |> as.numeric())
+ slr_df <- slr_df |> arrange_at(c("model_level", "year"))
+ slr_df <- slr_df |> mutate(model = model_factor |> as.character())
+ slr_df <- slr_df |> select(-c("model_factor"))
### Character vector of model names
c_slrs0 <- slr_labels
### Check that years are unique
- data_years <- data$year |> unique()
+ data_years <- data[["year"]] |> unique()
n_data_years <- data_years |> length()
nrows_data <- data |> nrow()
- check_unique_years <- nrows_data > n_data_years
-
- if(check_unique_years){
- if(msgUser){
- message("\t", "values for 'yCol' are not unique...")
- message("\t", "Averaging over 'yCol' values...")
- }
- data <- data |> group_by_at(c("year")) |> summarize_at(c("yValue"), mean, na.rm=T)
- }
- rm("n_data_years", "nrows_data", "check_unique_years")
+ flag0 <- nrows_data > n_data_years
+ ### Message user
+ if(flag0){
+ "\t" |> paste0("values for 'yCol' are not unique...")
+ "\t" |> paste0("Averaging over 'yCol' values...")
+ data <- data |> group_by_at(c("year")) |> summarize_at(c("yValue"), mean, na.rm=T) |> ungroup()
+ } ### End if(flag0)
+ rm(n_data_years, nrows_data, flag0)
###### Prepare data ######
### Filter to appropriate years
- data <- data |> filter(year %in% df_slr_years)
- n_years <- data |> nrow()
+ data <- data |> filter(year %in% df_slr_years)
+ n_years <- data |> nrow()
+ ### Figure if years are missing
+ dataYrs <- data[["year"]]
###### Standard Columns ######
### JoinCols
join0 <- c("year", "model_level")
- select0 <- c("year", newColName_y, newColRef_y, "model")
- select1 <- c("year", newColName_y, "lower_model", "upper_model", "lower_slr", "upper_slr")
+ select0 <- c("year") |> c(newColName_y, newColRef_y) |> c("model")
+ select1 <- c("year") |> c(newColName_y) |> c("lower_model", "upper_model", "lower_slr", "upper_slr")
### Format data
- # y <- y |> mutate(model_factor = model_factor |> as.character())
- x <- data; rm("data")
- y <- slr_df |> rename(yValue_ref = driverValue); rm("slr_df")
+ slr_df <- slr_df |> rename(yValue_ref = driverValue)
### Join
- z <- x |> left_join(y, by = "year")
- ### Filter observations
- z_lo <- z |> filter(yValue_ref <= yValue); #n_lo <- z_lo |> nrow()
- z_hi <- z |> filter(yValue_ref >= yValue); #n_hi <- z_hi |> nrow()
- ### Figure if years are missing
- yrs_z <- x$year
- yrs_lo <- z_lo$year |> unique() |> sort(); nas_lo <- yrs_z[!(yrs_z %in% yrs_lo)]
- yrs_hi <- z_hi$year |> unique() |> sort(); nas_hi <- yrs_z[!(yrs_z %in% yrs_hi)]
- ### Add years to data
- dfNaLo <- data.frame(year = yrs_lo, model_level = slr_min) |> left_join(z, by = c(all_of(join0)))
- dfNaHi <- data.frame(year = yrs_lo, model_level = slr_max) |> left_join(z, by = c(all_of(join0)))
- ### Add missing values back in
- z_lo <- z_lo |> rbind(dfNaLo) |> arrange_at(.vars=c(join0[1]))
- z_hi <- z_hi |> rbind(dfNaHi) |> arrange_at(.vars=c(join0[1]))
- ### Get low values
- x_lo <- z_lo |>
- group_by_at(.vars=c("year")) |>
- summarize_at(.vars=c("model_level"), max, na.rm=T) |> ungroup() |>
- (function(a, b = x){
- b |> left_join(a, by = c("year"))
- })() |>
- left_join(y, by=c("year", "model_level")) |>
- select(c(all_of(select0))) |>
- rename(lower_slr = yValue_ref, lower_model = model)
-
- ### Get hi values
- x_hi <- z_hi |>
- group_by_at(.vars=c("year")) |>
- summarize_at(.vars=c("model_level"), min, na.rm=T) |> ungroup() |>
- (function(a, b = x){
- b |> left_join(a, by = c("year"))
- })() |>
- left_join(y, by=c("year", "model_level")) |>
- select(c(all_of(select0))) |>
- rename(upper_slr = yValue_ref, upper_model = model)
+ df_new <- data |> left_join(slr_df, by = "year")
+ ### Filter to specific observations
+ df_lo <- df_new |> filter(yValue_ref <= yValue)
+ df_hi <- df_new |> filter(yValue_ref >= yValue)
+ ### Get unique years
+ yrs_lo <- df_lo[["year"]] |> unique() |> sort()
+ yrs_hi <- df_hi[["year"]] |> unique() |> sort()
+ ### Figure out which years are missing
+ nas_lo <- dataYrs[!(dataYrs %in% yrs_lo)]
+ nas_hi <- dataYrs[!(dataYrs %in% yrs_hi)]
+ ### Create tibbles of missing years
+ dfNaLo <- tibble(year = yrs_lo, model_level = slr_min) |> left_join(df_new, by = c(join0))
+ dfNaHi <- tibble(year = yrs_lo, model_level = slr_max) |> left_join(df_new, by = c(join0))
+ ### Bind missing years back in
+ df_lo <- df_lo |> rbind(dfNaLo) |> arrange_at(c(join0[1]))
+ df_hi <- df_hi |> rbind(dfNaHi) |> arrange_at(c(join0[1]))
+ ### Drop values
+ rm(yrs_lo, yrs_hi, nas_lo, nas_hi, dfNaLo, dfNaHi); rm(df_new)
+
+ ### Summarize values
+ ### Columns
+ group0 <- c("year")
+ sum0 <- c("model_level")
+ join0 <- c("year")
+ join1 <- join0 |> c(sum0)
+ rename0 <- c("yValue_ref", "model")
+ rename1 <- c("lower_slr", "lower_model")
+ rename2 <- c("upper_slr", "upper_model")
+
+ ### Get lower values
+ ### - Summarize
+ df_lo <- df_lo |> group_by_at(c(group0)) |>
+ summarize_at(c(sum0), max, na.rm=T) |> ungroup()
+ ### - Join
+ df_lo <- data |> left_join(df_lo, by=c(join0))
+ df_lo <- df_lo |> left_join(slr_df , by=c(join1))
+ ### - Select & rename
+ df_lo <- df_lo |> select(all_of(select0))
+ df_lo <- df_lo |> rename_at(c(rename0), ~rename1)
+
+ ### Get upper values
+ ### - Summarize
+ df_hi <- df_hi |>
+ group_by_at (c(group0)) |>
+ summarize_at(c(sum0), min, na.rm=T) |> ungroup()
+ ### - Join
+ df_hi <- data |> left_join(df_hi, by=c(join0))
+ df_hi <- df_hi |> left_join(slr_df , by=c(join1))
+ ### - Select & rename
+ df_hi <- df_hi |> select(all_of(select0))
+ df_hi <- df_hi |> rename_at(c(rename0), ~rename2)
+ ### Remove values
+ rm(group0, sum0, join1, rename0, rename1, rename2)
+ rm(slr_df)
### Join all
- z <- x_lo |> left_join(x_hi, by = c("year", all_of(newColName_y)))
- z <- z |> select(c(all_of(select1)))
+ join0 <- c("year") |> c(newColName_y)
+ data <- df_lo |> left_join(df_hi, by=c(join0))
+ data <- data |> select(all_of(select1))
+ rm(df_lo, df_hi)
### Add adjustment
- z <- z |>
- mutate(denom_slr = upper_slr - lower_slr ) |>
- mutate(numer_slr = upper_slr - yValue) |>
- mutate(adj_slr = numer_slr / denom_slr ) |>
- mutate(is_inf = adj_slr |> is.infinite()) |>
- mutate(adj_slr = adj_slr * (!is_inf)) |>
- mutate(adj_slr = adj_slr |> replace_na(0)) |>
- select(-c("is_inf"))
+ data <- data |> mutate(denom_slr = upper_slr - lower_slr )
+ data <- data |> mutate(numer_slr = upper_slr - yValue)
+ data <- data |> mutate(adj_slr = numer_slr / denom_slr )
+ data <- data |> mutate(is_inf = adj_slr |> abs() |> is.infinite())
+ data <- data |> mutate(adj_slr = adj_slr * (!is_inf))
+ data <- data |> mutate(adj_slr = adj_slr |> replace_na(0))
+ data <- data |> select(-c("is_inf"))
### Rename yValue and return
- df_return <- z |> rename_at(.vars=c(all_of(newColName_y)), ~oldColName_y)
- return(df_return)
-
-}
-
-###### fun_slrModel2Height ######
-### Helper function to convert SLR model to height in cm
-fun_slrModel2Height <- function(
- col_x, ### column "model_dot"
- include = c("factor", "values"),
- valType = c("numeric", "character", "factor"),
- labelType = c("numeric", "character") ### Used for factor or label
+ data <- data |> rename_at(c(newColName_y), ~oldColName_y)
+ return(data)
-){
- ### Checks
- do_factor <- "factor" %in% include
- do_values <- "values" %in% include
- do_both <- do_factor & do_values
- ### Value types and priority
- valTypes <- c("numeric", "character", "factor")
- valType0 <- valType
- valType0 <- valTypes |> (function(y, types_y=valTypes){
- ls1 <- ls0 <- types_y
- c0 <- ls0[1] %in% y
- c1 <- ls0[2] %in% y
- c3 <- ls0[2] %in% y
- if(c0) {ls1 <- ls0[1]}
- else if(c1) {ls1 <- ls0[2]}
- else {ls1 <- ls0[3]}
- return(ls1)
- })()
- do_numb <- "numeric" %in% valType
- do_char <- "character" %in% valType
- do_fact <- "factor" %in% valType
- # valType |> print(); labelType |> print()
- ### Label types and priority
- labTypes <- c("numeric", "character")
- label_x0 <- labelType |>
- (function(y, types_y=labTypes){
- ls1 <- ls0 <- types_y
- c0 <- do_numb | do_char
- c1 <- ls0[1] %in% y
- if(c0) {ls1 <- ls0[1]}
- else if(c1) {ls1 <- ls0[1]}
- else {ls1 <- ls0[2]}
- return(ls1)
- })()
- # label_x0 |> print()
- labChar <- "character" %in% label_x0
- # label_x0 |> print(); labChar |> print()
- ### Original labels
- lvl_x0 <- col_x |> unique()
- df_x0 <- data.frame(levels=lvl_x0)
- ### Standardize
- df_x0$labels <- gsub("_" , "", df_x0$levels)
- df_x0$numbers <- gsub("cm", "", df_x0$labels)
- df_x0$values <- df_x0$numbers |> as.character() |> as.numeric()
- ### Sprt
- df_x0 <- df_x0 |> arrange_at(.vars=c("values"))
- ### Create factor list
- list_x <- list(factors=df_x0)
- ### Adjust values
- vals_x <- NULL
- if(do_values){
- if(labChar){labels_x <- df_x0$labels}
- else {labels_x <- df_x0$values}
- vals_x <- col_x |> factor(levels=df_x0$levels, labels=labels_x)
- if(do_char){vals_x <- vals_x |> as.character()}
- if(do_numb){vals_x <- vals_x |> as.numeric()}
- list_x[["values"]] <- vals_x
- }
- ### Return list
- if (do_both ) {return_x <- list_x}
- else if(do_factor) {return_x <- list_x$factors}
- else {return_x <- list_x$values}
- ### Return
- return(return_x)
}
-###### SLR Extremes ######
-### Function for dealing with SLR values above the maximum
-fun_slrConfigExtremes <- function(
- slr_x, ### rDataList$slr_cm
- imp_x ### rDataList$slrImpacts
-){
- ### Vectors
- cYear0 <- c("year")
- cDriver0 <- c("driverValue")
- cImpact0 <- c("scaled_impacts")
- cSlr0 <- c("model_cm")
- modCols0 <- c("model", "model_dot", "model_type")
- cOrder0 <- c("valueType")
- ### Arrange columns
- arrange0 <- c(cDriver0, cSlr0)
- arrange1 <- c(cImpact0, cSlr0)
- ### Other cols slr_x
- slrCols0 <- slr_x |> names() |> (function(x){x[!(x %in% c(arrange0, modCols0, cYear0))]})() |> c(cYear0)
- impCols0 <- imp_x |> names() |> (function(x){x[!(x %in% c(arrange1, modCols0, cYear0))]})() |> c(cYear0)
- # slrCols0 |> print(); impCols0 |> print()
- ### Join Cols
- join0 <- c(cYear0, cSlr0)
- # join1 <- c(impCols0)
- group00 <- c(impCols0, cDriver0)
- group0 <- c(group00, cOrder0)
- group1 <- c(group0, cImpact0)
- ### Other columns
- rename0 <- c(cDriver0, cImpact0, cSlr0)
- suffix0 <- c("1", "2")
- bounds0 <- c("lower", "upper")
- drop0 <-
- c(cDriver0 |> paste0(suffix0)) |>
- c(cImpact0 |> paste0(suffix0)) |>
- c(cSlr0 |> paste0(suffix0)) |>
- c("delta_impacts", "delta_driverValue")
- ### Prepare data
- ### SLR Heights: slr_df; SLR Impacts: imp_df
- slr_df <- slr_x |> mutate(model_cm = model_dot |> fun_slrModel2Height(include="values"))
- imp_df <- imp_x |> mutate(model_cm = model_dot |> fun_slrModel2Height(include="values"))
- rm("slr_x", "imp_x")
- # slr_df |> head() |> glimpse(); imp_df |> head() |> glimpse()
-
- ### Get upper and lower for each year
- slrYears <- slr_df$year |> unique() |> sort()
- slr_extr <- slrYears |> lapply(function(
- year_i, data_x = slr_df, data_y = imp_df
- ){
- ### Filter data
- dfx_i <- data_x |> filter(year==year_i) |> select(-c(all_of(modCols0)))
- dfy_i <- data_y |> filter(year==year_i) |> select(-c(all_of(modCols0)))
- # if(year_i %in% slrYears[1]){"got here1" |> print(); dfx_i |> head() |> glimpse()}
-
- ### Driver values:
- ### - Get driver values and then unique driver values
- ### - Figure out which the last values belong to
- vals_i <- dfx_i$driverValue |> unique() |> sort(decreasing=TRUE)
- ### Add value
- addVal_i <- length(vals_i) == 1
- if(addVal_i){vals_i <- vals_i |> rep(2)}
- ref_i <- data.frame(
- year = year_i,
- driverValue = vals_i[1:2],
- valueType = bounds0[2:1]
- )
- # if(year_i %in% slrYears[1]){"got here2" |> print(); ref_i |> head() |> glimpse()}
- ### Filter dfx_i to driver values %in% first_i and add an order
- dfx_i <- dfx_i |> left_join(ref_i, by=c(all_of(cYear0), all_of(cDriver0)))
- dfx_i <- dfx_i |> filter(!is.na(valueType))
- # if(year_i %in% slrYears[1]){"got here2" |> print(); dfx_i |> head() |> glimpse()}
- # rm("vals_i", "ref_i")
-
- ### Join impacts with ref_i
- # if(year_i %in% slrYears[1]){dfy_i |> filter(is.na(sector)) |> nrow() |> print()}
- ### Join with driver values
- dfy_i <- dfy_i |> left_join(dfx_i, by = c(all_of(join0)))
- dfy_i <- dfy_i |> filter(!is.na(valueType))
- # if(year_i %in% slrYears[1]){"got here3" |> print(); dfy_i |> head() |> glimpse()}
- ### Get maximum impacts
- df_i <- dfy_i |>
- group_by_at(.vars=c(all_of(group0))) |>
- summarize_at(.vars=c(all_of(cImpact0)), max, na.rm=T) |> ungroup()
- ### Filter to maximum impacts and get associated model
- df_i <- df_i |> left_join(dfy_i, by = c(all_of(group1)))
- df_i <- df_i |>
- group_by_at(.vars=c(all_of(group1))) |>
- summarize_at(.vars=c(all_of(cSlr0)), max, na.rm=T) |> ungroup()
- # if(year_i %in% slrYears[1]){"got here4" |> print(); df_i |> head() |> glimpse()}
- return(df_i)
- }) |> (function(df_i){do.call(rbind, df_i)})()
- ### Select and arrange
- # slr_extr |> names() |> print()
- slr_extr <- slr_extr |> select(c(all_of(group1), all_of(cSlr0)))
- slr_extr <- slr_extr |> arrange_at(.vars=c(all_of(group1)))
- ### Spread lower and upper values and join them together
- slr_extr <- slr_extr |> (function(data_x){
- ### Separate into lower and upper values and join data
- data_lo <- data_x |> filter(valueType=="lower") |> select(-c(all_of(cOrder0)))
- data_up <- data_x |> filter(valueType!="lower") |> select(-c(all_of(cOrder0)))
- ### Rename columns
- data_lo <- data_lo |> rename_at(.vars=c(all_of(rename0)), ~paste0(rename0, "1"))
- data_up <- data_up |> rename_at(.vars=c(all_of(rename0)), ~paste0(rename0, "2"))
- data_x <- data_up |> left_join(data_lo, by = c(all_of(impCols0)))
- # data_x |> names() |> print()
-
- ### Calculate differences
- data_x <- data_x |> mutate(delta_impacts = scaled_impacts2 - scaled_impacts1)
- data_x <- data_x |> mutate(delta_driverValue = driverValue2 - driverValue1)
- ### Calculate absolute values
- data_x <- data_x |> mutate(delta_impacts = delta_impacts |> abs())
- data_x <- data_x |> mutate(delta_driverValue = delta_driverValue |> abs())
- ### Calculate slope and intercept
- data_x <- data_x |> mutate(driverValue_ref = ifelse(driverValue2 > driverValue1 , driverValue2 , driverValue1))
- data_x <- data_x |> mutate(impacts_intercept = ifelse(scaled_impacts2 > scaled_impacts1, scaled_impacts2, scaled_impacts1))
- data_x <- data_x |> mutate(impacts_slope = delta_impacts/delta_driverValue)
- # ### Check values
- # data_x |> filter(driverValue2 < driverValue1 ) |> nrow() |> print() ### 0
- # data_x |> filter(scaled_impacts2 < scaled_impacts1) |> nrow() |> print() ### 467
- # data_x |> filter(impacts_slope < 0) |> nrow() |> print() ### 467
- # data_x |> filter(impacts_slope < 0 & impacts_intercept < 0) |> nrow() |> print()
- ### Replace zeros
- which0 <- (data_x$delta_driverValue == 0) |> which()
- data_x$impacts_slope[which0] <- 0
- # data_x |> names() |> print()
- ### Drop intermediate columns and return
- data_x <- data_x |> select(-c(all_of(drop0)))
- return(data_x)
- })()
- # # ext_slr |> glimpse() |> print();
- # ### Format and Return
- # # slr_extr |> names() |> print()
- # slr_extr <- slr_extr |> arrange_at(.vars=c(all_of(impCols0)))
- # slr_extr[,c(modCols0, cSlr0)] <- "Interpolation"
- return(slr_extr)
-}
-
-####### Extend SLR Scenarios ######
-### Function to extend SLR scenarios in createSystemData
-extend_slr <- function(
- x,
- # maxYear_x = 2090,
- newMax_x = 2300,
- arrange_x = c("model", "year")
-){
- ### Values
- maxYear_x <- x$year |> max()
- ### Format Dataframes
- x_nu <- data.frame(year = (maxYear_x + 1):newMax_x) |> mutate(dummyCol = 1)
- x_up <- x |> filter(year == maxYear_x) |> mutate(dummyCol = 1) |> select(-c("year"))
- x_lo <- x |> filter(year <= maxYear_x)
- rm("x")
- ### Join data
- x_up <- x_up |> left_join(x_nu, by = c("dummyCol")) |> select(-c("dummyCol"))
- x <- x_lo |> rbind(x_up)
- rm("x_nu", "x_up", "x_lo")
- ### Arrange and standardize model type
- x <- x |> arrange_at(.vars = c(all_of(arrange_x))) |> mutate(model_type = "slr")
- return(x)
-}
####### fun_getNeighbors ######
### Figure out which SLR heights are immediately above and below a driver value
@@ -935,29 +1380,26 @@ fun_getNeighbors <- function(
# ### Mutate data
### Add a dummy column with a standardized name
- values <- values |> as.data.frame()
- values$newCol <- values[,col]
+ values$newCol <- values[[col]]
### Look for equal values
values_equal <- values |> filter(newCol==x)
num_equal <- values_equal |> nrow()
### If there are equal values, get the upper and lower value
+ ### If there are no equal values, figure if there are any values below the value
if(num_equal>0){
### If there is only one value that is equal, return that value twice
+ ### If there is more than one value that is equal, return the lower most and uppermost equal values
if(num_equal==1){
lo_order <- values_equal$order |> unique()
hi_order <- lo_order
- }
- ### If there is more than one value that is equal, return the lower most and uppermost equal values
- else{
- c_orders <- values_equal$order
- lo_order <- values_equal$order |> min(na.rm=T)
- hi_order <- values_equal$order |> max(na.rm=T)
- }
- }
- ### If there are no equal values, figure if there are any values below the value
- else{
+ } else{
+ c_orders <- values_equal[["order"]]
+ lo_order <- values_equal[["order"]] |> min(na.rm=T)
+ hi_order <- values_equal[["order"]] |> max(na.rm=T)
+ } ### End if(num_equal==1)
+ } else{
values_below <- values |> filter(newCol < x)
num_below <- values_below |> nrow()
@@ -972,163 +1414,112 @@ fun_getNeighbors <- function(
hi_order <- values_above$order |> min(na.rm=T)
} else{
### Figure out if there are any values above it
+ ### - Return the max value for the low value and the hi value
+ ### - Otherwise, return the max value for the low value and the hi value
if(num_above==0){
- ### Return the max value for the low value and the hi value
- lo_order <- values_below$order |> max(na.rm=T)
+ lo_order <- values_below[["order"]] |> max(na.rm=T)
hi_order <- lo_order
- }
- ### If there are some numbers above and below it
- else{
- ### Return the max value for the low value and the hi value
- lo_order <- values_below$order |> max(na.rm=T)
- hi_order <- values_above$order |> min(na.rm=T)
- }
- }
-
- }
+ } else{
+ lo_order <- values_below[["order"]] |> max(na.rm=T)
+ hi_order <- values_above[["order"]] |> min(na.rm=T)
+ } ### End if(num_above==0)
+ } ### End if(num_below==0)
+ } ### End if(num_equal>0)
# lo_order |> print()
lo_values <- values |> filter(order==lo_order) |> mutate(type="lower")
hi_values <- values |> filter(order==hi_order) |> mutate(type="upper")
new_values <- lo_values |> rbind(hi_values)
-
new_values <- lo_values |> rbind(hi_values)
- return(new_values)
-}
-
-###### fun_formatScalars ######
-### Function to format scalars in createSystemData
-fun_formatScalars <- function(
- data_x, ### rDataList$scalarDataframe
- info_x, ### rDataList$co_scalarInfo
- years_x ### rDataList$list_years
-){
- names_x <- data_x$scalarName |> unique()
- num_x <- names_x |> length()
- new_x <- 1:num_x |> lapply(function(i){
- ### Figure out name, method, region
- name_i <- names_x[i]
- # name_i |> print()
- ### Dataframes
- scalar_i <- data_x |> filter(scalarName==name_i)
- info_i <- info_x |> filter(scalarName==name_i)
- ### Info about scalar
- method_i <- info_i$constant_or_dynamic[1]
- method_i <- ifelse(method_i=="constant", method_i, "linear")
- # region_i <- info_i$national_or_regional[1]
- ### Interpolate data
- scalar_i <- scalar_i |> interpolate_annual(
- years = years_x,
- column = "value", rule = 1:2,
- method = method_i)
- ### Add in name and return
- scalar_i <- scalar_i |> mutate(scalarName=name_i)
- return(scalar_i)
- }) |> (function(scalars_i){do.call(rbind, scalars_i)})()
- ### Join info
- select0 <- c("scalarName", "scalarType", "national_or_regional")
- select1 <- c("scalarName", "region", "year", "value")
- info_x <- info_x |> select(c(all_of(select0)))
- new_x <- new_x |> select(c(all_of(select1)))
- new_x <- new_x |> left_join(info_x, by=c("scalarName"))
### Return
- return(new_x)
+ return(new_values)
}
-
###### fun_getScale ######
### This function creates a set of breaks for a particular column
### It returns a list of breaks, the power of 10, and the limits
-fun_getScale <-
- function(
+fun_getScale <- function(
data,
scaleCol = "driverValue",
# zero = F,
nTicks = 5
){
-
### Defaults
- if(is.null(scaleCol)) scaleCol <- "driverValue"
+ if(scaleCol |> is.null()){scaleCol <- "driverValue"}
### Default is not to zero out in case there are negative numbers
# if(is.null(zero)) zero <- F
- if(is.null(nTicks)){nTicks <- 5}
+ if(nTicks |> is.null()){nTicks <- 5}
### Min/max values
- data <- data |> as.data.frame()
- xMin <- data[,scaleCol] |> min(na.rm=T)
- xMax <- data[,scaleCol] |> max(na.rm=T)
+ xMin <- data[[scaleCol]] |> min(na.rm=T)
+ xMax <- data[[scaleCol]] |> max(na.rm=T)
### Set minimum to zero unless the minimum is less than zero
- if(xMin > 0){
- xMin <- 0
- }
-
- if(xMax < 0){
- xMax <- 0
- }
+ if(xMin > 0){xMin <- 0}
+ if(xMax < 0){xMax <- 0}
### Min/max values
### Limit names, values, bounds, etc
- df_minMax <-
- data.frame(
- name = c("min", "max"),
- value = c(xMin, xMax),
- bound = c(floor(xMin), ceiling(xMax)),
- boundType = c("floor", "ceiling")
- ) |>
- ### Absolute value, Power of 10 and y-scale info
- mutate(bound_abs = bound |> abs()) |>
- ### Calculate log 10 and replace values of infinity with 0
- mutate(log10 = (bound_abs |> log10())) |>
- mutate(log10 = log10 |> abs() |> na_if(Inf)) |>
- mutate(log10 = log10 |> replace_na(0)) |>
- ### Then get the floor of the log10 value
- mutate(power10 = log10 |> floor())
+ df_minMax <- tibble(
+ name = c("min", "max"),
+ value = c(xMin, xMax),
+ bound = c(floor(xMin), ceiling(xMax)),
+ boundType = c("floor", "ceiling")
+ ) ### End tibble
+
+ ### Absolute value, Power of 10 and y-scale info
+ df_minMax <- df_minMax |> mutate(bound_abs = bound |> abs())
+ ### Calculate log 10 and replace values of infinity with 0
+ df_minMax <- df_minMax |> mutate(log10 = (bound_abs |> log10()))
+ df_minMax <- df_minMax |> mutate(log10 = log10 |> abs() |> na_if(Inf))
+ df_minMax <- df_minMax |> mutate(log10 = log10 |> replace_na(0))
+ ### Then get the floor of the log10 value
+ df_minMax <- df_minMax |> mutate(power10 = log10 |> floor())
### Get maximum power of 10, then scale to zero for negative numbers
### Integer division of power of 10 by 3 to get number of thousands
### Then get the modulus of the thousands
- x_power10Max <- df_minMax$power10 |> max(na.rm=T)
+ x_power10Max <- df_minMax[["power10"]] |> max(na.rm=T)
x_power1000 <- x_power10Max %/% 3
x_mod1000 <- x_power10Max %% 3
### Rounded bounded values (round to 1 essentially)
divideByPower <- x_power10Max - 1
- minMax_scaled <- df_minMax$value / 10^divideByPower
- bounds_scaled_rounded <- c(floor(minMax_scaled[1]), ceiling(minMax_scaled[2]))
- bounds_rounded <- bounds_scaled_rounded * 10^divideByPower
+ minMax_scaled <- df_minMax[["value"]] / 10**divideByPower
+ bounds_scaled_rounded <- minMax_scaled[1] |> floor() |> c(minMax_scaled[2] |> ceiling())
+ bounds_rounded <- bounds_scaled_rounded * 10**divideByPower
- ###### Establish the range of x ######
+ ###### Establish the range of x
x_range <- bounds_rounded
- x_range_p10 <- x_range / 10^x_power10Max
+ x_range_p10 <- x_range / 10**x_power10Max
x_range_dif <- x_range_p10[2] - x_range_p10[1]
### Determine unit of breaks in power of 10
x_unit_p10 <- 0.5
- x_breaks_p10 <- seq(x_range_p10[1], x_range_p10[2], by = x_unit_p10)
+ x_breaks_p10 <- seq(x_range_p10[1], x_range_p10[2], by=x_unit_p10)
n_Ticks <- x_breaks_p10 |> length()
if(n_Ticks>nTicks){
x_unit_p10 <- 1
- x_breaks_p10 <- seq(x_range_p10[1], x_range_p10[2], by = x_unit_p10)
+ x_breaks_p10 <- seq(x_range_p10[1], x_range_p10[2], by=x_unit_p10)
n_Ticks <- x_breaks_p10 |> length()
if(n_Ticks>nTicks){
x_unit_p10 <- 2
- x_breaks_p10 <- seq(x_range_p10[1], x_range_p10[2], by = x_unit_p10)
+ x_breaks_p10 <- seq(x_range_p10[1], x_range_p10[2], by=x_unit_p10)
n_Ticks <- x_breaks_p10 |> length()
- }
- }
- x_breaks <- x_breaks_p10 * 10^x_power10Max
+ } ### End if(n_Ticks>nTicks)
+ } ### End if(n_Ticks>nTicks)
+ x_breaks <- x_breaks_p10 * 10**x_power10Max
# return(x_breaks)
### Create list to return
- return_list <- list(
- breaks = x_breaks,
- limits = df_minMax$value,
- bounds = bounds_rounded,
- power10 = x_power10Max,
- power1000 = x_power1000,
- mod1000 = x_mod1000
- )
+ return_list <- list()
+ return_list[["breaks" ]] <- x_breaks
+ return_list[["limits" ]] <- df_minMax[["value"]]
+ return_list[["bounds" ]] <- bounds_rounded
+ return_list[["power10" ]] <- x_power10Max
+ return_list[["power1000"]] <- x_power1000
+ return_list[["mod1000" ]] <- x_mod1000
+ ### Return
return(return_list)
}
@@ -1137,14 +1528,10 @@ fredi_slrInterp <- function(
data_x,
slr_x, ### slrScenario
groupByCols
- # drivers_x ### driverScenario |> filter(tolower(model_type)=="slr") |> select(-c("model_type"))
- # drivers_x, ### driverScenario |> filter(tolower(model_type)=="slr") |> select(-c("model_type"))
){
-
- #data_x <-df_slrImpacts
- #slr_x = slrScenario
- #groupByCols=slrGroupByCols
names_slr <- data_x |> names(); #names_slr |> print()
+ # byState <- "state" %in% names_slr
+ # if(byState){stateCols0 <- c("state", "postal")} else{stateCols0 <- c()}
### Summary columns
slrSumCols <- c("scaled_impacts")
n_slrSumCols <- slrSumCols |> length()
@@ -1155,120 +1542,129 @@ fredi_slrInterp <- function(
### Info names
### "year", "driverValue", "lower_model" , "upper_model", "lower_slr" , "upper_slr"
data_xAdj <- slr_x; rm("slr_x")
- names_slrAdj <- data_xAdj |> names(); #names_slrAdj |> print()
- other_slrCols <- names_slrAdj[which(names_slrAdj!="year")]
+ names_slrAdj <- data_xAdj |> names()
+ #names_slrAdj |> print(); other_slrCols |> print(); join_slrCols |> print()
+ other_slrCols <- names_slrAdj |> (function(x){x[!(x %in% c("year"))]})()
join_slrCols <- c(groupByCols, "year") ### sectorprimary, includeaggregate
- join_cols0 <- c("driverValue", "year")
### Other columns
dropCols0 <- c("model", "model_dot")
otherCols0 <- c("modelType", "denom_slr", "numer_slr", "adj_slr")
- joinCols1 <- join_slrCols[!(join_slrCols %in% dropCols0)] |>
- c(bounds0 |> paste("model", sep="_")) |>
- c(bounds0 |> paste("slr", sep="_")) |>
- c(otherCols0)
+ joinCols1 <- join_slrCols |> (function(x){x[!(x %in% dropCols0)]})()
+ joinCols1 <- joinCols1 |> c(paste0("_", "model"))
+ joinCols1 <- joinCols1 |> c(paste0("_", "slr" ))
+ joinCols1 <- joinCols1 |> c(otherCols0)
### Format values
- data_xAdj <- data_xAdj |> mutate_at(.vars=c(all_of(slrMutCols)), as.character)
+ data_xAdj <- data_xAdj |> mutate_at(c(slrMutCols), as.character)
### Join with slrInfo and convert columns to character
+ # join_cols0 |> print(); data_x |> glimpse(); data_xAdj |> glimpse()
+ # join0 <- c("driverValue", "year")
+ # data_xAdj |> glimpse(); data_x |> glimpse()
+ join0 <- c("year")
data_xAdj <- data_xAdj |> mutate(equal_models = lower_model == upper_model)
- data_x <- data_x |> left_join(data_xAdj, by=all_of(join_cols0))
+ data_x <- data_x |> left_join(data_xAdj, by=join0)
+ rm(join0)
# data_x |> filter(!is.na(scaled_impacts)) |> nrow() |> print()
- rm("data_xAdj")
+ rm(data_xAdj)
### Filter to conditions
data_xEqual <- data_x |> filter( equal_models) |> select(-c("equal_models"));
data_xOther <- data_x |> filter(!equal_models) |> select(-c("equal_models"));
- # data_x |> names() |> print()
- # data_x$model |> unique() |> print()
- # data_x$model_dot |> unique() |> print()
- # data_x$lower_model |> unique() |> print()
- # data_x$upper_model |> unique() |> print()
- # c(nrow(data_xEqual), nrow(data_xOther)) |> print()
- rm("data_x")
+ rm(data_x)
+ ### Which types to calculate
+ hasEqual <- data_xEqual |> nrow()
+ hasOther <- data_xOther |> nrow()
### Process observations that are equal
- if(nrow(data_xEqual)){
+ if(hasEqual){
### Filter observations that are zeros only and make the summary column values zero
- data_xEqual0 <- data_xEqual |> filter(lower_model=="0cm") |> filter(model_dot=="30cm")
- data_xEqual1 <- data_xEqual |> filter(lower_model!="0cm") |> filter(model_dot==lower_model)
+ data_xEqual0 <- data_xEqual |> filter(lower_model=="0cm") |> filter(model_dot=="30cm")
+ data_xEqual1 <- data_xEqual |> filter(lower_model!="0cm") |> filter(model_dot==lower_model)
# c(nrow(data_xEqual0), nrow(data_xEqual1)) |> print()
- rm("data_xEqual")
+ rm(data_xEqual)
### For observations that are zeros only and make the summary column values zero
- data_xEqual0 <- data_xEqual0 |> mutate_at(.vars=c(all_of(slrSumCols)), function(y){0})
+ mutate0 <- slrSumCols
+ data_xEqual0 <- data_xEqual0 |> mutate_at(c(mutate0), function(y){0})
+ rm(mutate0)
### Bind values back together
- data_xEqual <- data_xEqual0 |> rbind(data_xEqual1)
- rm("data_xEqual0", "data_xEqual1")
- ### Rename the model_dot, select appropriate columns
- data_xEqual <- data_xEqual |> mutate(model_dot="Interpolation")
- # data_xEqual |> names|> print()
- data_xEqual <- data_xEqual |> select(c(all_of(names_slr))) |> select(-c(all_of(dropCols0)))
+ data_xEqual <- data_xEqual0 |> rbind(data_xEqual1)
+ rm(data_xEqual0, data_xEqual1)
+ ### Mutate model_dot
+ data_xEqual <- data_xEqual |> mutate(model_dot="Interpolation")
+ ### Select appropriate columns
+ # data_xEqual |> glimpse()
+ select0 <- names_slr
+ drop0 <- dropCols0
+ data_xEqual <- data_xEqual |> select(all_of(select0))
+ data_xEqual <- data_xEqual |> select(-all_of(drop0 ))
+ rm(select0, drop0)
} ### End if length(which_equal) > 0
### Observations that are greater than zero
- if(nrow(data_xOther)){
+ if(hasOther){
### Lower and upper column names and new names
# slrSumCols |> print()
- lowerSumCols <- slrSumCols |> paste("lower", sep="_")
- upperSumCols <- slrSumCols |> paste("upper", sep="_")
+ lowerSumCols <- slrSumCols |> paste0("_", "lower")
+ upperSumCols <- slrSumCols |> paste0("_", "upper")
### Filter lower model_dot observations to those with a lower model_dot value == "0 cm" and others and drop model_dot column
data_xLower0 <- data_xOther |> filter(lower_model=="0cm") #|> mutate(lower_model = "30cm")
data_xLower1 <- data_xOther |> filter(lower_model!="0cm")
- # data_xLower <- data_xLower0 |> rbind(data_xLower1) |>
# rm("data_xLower0", "data_xLower1")
data_xUpper <- data_xOther |> filter(model_dot==upper_model)
- rm("data_xOther")
+ # data_xOther |> glimpse(); #data_xLower0 |> glimpse(); data_xUpper |> glimpse()
+ rm(data_xOther)
### Rename columns
- data_xLower0 <- data_xLower0 |> rename_with(~lowerSumCols[which(slrSumCols==.x)], .cols=slrSumCols)
- data_xLower1 <- data_xLower1 |> rename_with(~lowerSumCols[which(slrSumCols==.x)], .cols=slrSumCols)
- data_xUpper <- data_xUpper |> rename_with(~upperSumCols[which(slrSumCols==.x)], .cols=slrSumCols)
+ # # data_xLower0 <- data_xLower0 |> rename_with(slrSumCols, ~lowerSumCols[slrSumCols==.x])
+ # # data_xLower1 <- data_xLower1 |> rename_with(slrSumCols, ~lowerSumCols[slrSumCols==.x])
+ # # data_xUpper <- data_xUpper |> rename_with(slrSumCols, ~upperSumCols[slrSumCols==.x])
+ data_xLower0 <- data_xLower0 |> rename_at(c(slrSumCols), ~lowerSumCols)
+ data_xLower1 <- data_xLower1 |> rename_at(c(slrSumCols), ~lowerSumCols)
+ data_xUpper <- data_xUpper |> rename_at(c(slrSumCols), ~upperSumCols)
+ # data_xLower0 |> glimpse(); data_xUpper |> glimpse()
+ # rm(rename0, rename1, rename2)
# rm("lowerSumCols", "upperSumCols")
### Convert values for observations with a lower model_dot value =="0 cm" to zero then filter to lower models
- data_xLower0 <- data_xLower0 |> mutate_at(.vars=c(all_of(lowerSumCols)), function(y){0})
+ data_xLower0 <- data_xLower0 |> mutate_at(c(lowerSumCols), function(y){0})
data_xLower0 <- data_xLower0 |> filter(model_dot=="30 cm")
data_xLower1 <- data_xLower1 |> filter(model_dot==lower_model)
data_xLower <- data_xLower0 |> rbind(data_xLower1)
- rm("data_xLower0", "data_xLower1")
+ rm(data_xLower0, data_xLower1)
### Drop columns
- # namesLower <- data_xLower |> names(); namesUpper <- data_xUpper |> names()
- # namesLower |> print(); namesUpper |> print()
- # dropCols0[!(dropCols0 %in% namesLower)] |> print(); dropCols0[!(dropCols0 %in% namesUpper)] |> print()
+ data_xLower <- data_xLower |> select(-all_of(dropCols0))
+ data_xUpper <- data_xUpper |> select(-all_of(dropCols0))
- data_xLower <- data_xLower |> select(-c(all_of(dropCols0)))
- data_xUpper <- data_xUpper |> select(-c(all_of(dropCols0)))
### Join upper and lower data frames
- # data_xLower |> names() |> print(); data_xUpper |> names() |> print()
- # joinCols0 |> print()
- # namesLower[!(namesLower %in% namesUpper)] |> print(); namesUpper[!(namesUpper %in% namesLower)] |> print()
- data_xOther <- data_xLower |> left_join(data_xUpper, by = c(all_of(joinCols1)))
+ join0 <- data_xLower |> names() |> (function(x){x[!(x %in% c(lowerSumCols))]})()
+ # data_xLower |> glimpse(); data_xUpper |> glimpse(); join0 |> print()
+ data_xOther <- data_xLower |> left_join(data_xUpper, by = c(join0))
rm("data_xLower", "data_xUpper")
### Calculate the new value
# data_xOther |> names() |> print()
slrLowerVals <- data_xOther[, lowerSumCols]
slrUpperVals <- data_xOther[, upperSumCols]
- # slrOtherAdj <- data_xOther[, "adj_slr"] |> as.vector()
- slrOtherAdj <- data_xOther |> get_vector(column = "adj_slr")
+ slrOtherAdj <- data_xOther[["adj_slr"]]
slrNewFactors <- (slrUpperVals - slrLowerVals) * (1 - slrOtherAdj)
slrNewValues <- slrLowerVals + slrNewFactors
data_xOther[,slrSumCols] <- slrNewValues
rm("slrLowerVals", "slrUpperVals", "slrOtherAdj", "slrNewFactors", "slrNewValues")
rm("lowerSumCols", "upperSumCols")
- ### When finished, drop columns and mutate model_dot column
- # data_xOther |> names() |> print(); names_slr |> print()
- names_slr0 <- names_slr[!(names_slr %in% dropCols0)]
- # data_xOther <- data_xOther |> mutate(model_dot="Interpolation")
- data_xOther <- data_xOther |> select(c(all_of(names_slr0)))
+ ### When finished, drop columns and mutate model_dot column
+ # names_slr |> print(); data_xOther |> glimpse()
+ data_xOther <- data_xOther |> select( any_of(names_slr))
+ data_xOther <- data_xOther |> select(-any_of(dropCols0))
+ # data_xOther |> glimpse()
} ### End if (nrow(data_xOther) > 0)
### Bind SLR averages together
- # c(nrow(data_xEqual), nrow(data_xOther)) |> print()
- # data_xEqual |> names() |> print(); data_xOther |> names() |> print()
+ # data_xEqual |> glimpse(); data_xOther |> glimpse()
data_x <- data_xEqual |> rbind(data_xOther)
- # data_x |> nrow() |> print()
- rm("data_xEqual", "data_xOther")
+ rm(data_xEqual, data_xOther)
return(data_x)
}
+
+
diff --git a/FrEDI/R/utils_create_report_figures.R b/FrEDI/R/utils_create_report_figures.R
index 22556145..f94462e2 100644
--- a/FrEDI/R/utils_create_report_figures.R
+++ b/FrEDI/R/utils_create_report_figures.R
@@ -1,46 +1,3 @@
-### Add names to list object
-addListNames <- function(
- list0, ### List object
- names0 ### Names to give to list or data frame
-){
- names(list0) <- names0
- return(list0)
-} ### End addListNames
-
-### This function makes it easier to get data objects from the sysdata.rda file
-get_ciraDataObj <- function(
- x, ### Object name
- listall = FALSE,
- listName = "rDataList",
- pkg = "FrEDI",
- lib.loc = .libPaths()[1] ### Library path to look for packages
- ){
- ### Messaging
- msg0 <- "\t"
- ### Check if list name exists
- exists0 <- listName |> exists()
- ### If the listname exists in the name space, parse it
- ### Otherwise, grab it from a package name space
- if(exists0){new_x <- parse(text=listName) |> eval()}
- else {
- ### Check if package & list name
- pkgList0 <- lib.loc |> installed.packages()
- pkgExists0 <- pkg %in% pkgList0
- if(!pkgExists0){
- msg0 |> paste0("Package doesn't exist...") |> message()
- msg0 |> paste0("Exiting...") |> message()
- return()
- } ### End if(!pkgExists0)
- else {new_x <- getFromNamespace(listName, ns=pkg)}
- } ### End else(exists0)
-
- ### Whether to list all items in data object or not
- if(listall) {return_x <- new_x |> names()}
- else {return_x <- new_x[[x]]}
- ### Return
- return(return_x)
-} ### End get_ciraDataObj
-
### Get column values from a tibble
get_column_values <- function(
df0, ### Tibble
@@ -127,18 +84,25 @@ filter_years <- function(
df0 <- df0 |> filter(year %in% years)
### Return
return(df0)
-} ### End filter_years
+} ### End filter_years()
### Filter values
### Format values to specific number of decimal places
format_values <- function(
df0, ### data
- cols0 = c("driverValue", "gdp_usd", "national_pop", "gdp_percap", "reg_pop", "annual_impacts"), ### Columns to format
+ byState = TRUE,
+ # cols0 = c("driverValue", "gdp_usd", "national_pop", "gdp_percap", "reg_pop", "annual_impacts"), ### Columns to format
digits = 16
){
- df0 <- df0 |> mutate_at(.vars=c(cols0), function(x){format(x, digits=digits)})
+ ### Pop columns
+ if(byState){popCols <- c("state", "postal")} else{c()}
+ popCol <- byState |> ifelse("state_pop", "reg_pop")
+ ### Columns
+ cols0 <- c("driverValue", "gdp_usd", "national_pop", "gdp_percap", popCol, "annual_impacts")
+ ### Mutate
+ df0 <- df0 |> mutate_at(vars(cols0), function(x){format(x, digits=digits)})
return(df0)
-}
+} ### End format_values()
### Run CONUS scenarios
create_constant_temp_scenario <- function(
@@ -189,7 +153,8 @@ create_constant_temp_scenario <- function(
#### Get scenario inputs
#### Get inputs list for a single scenario
get_scenario_inputsList <- function(
- df0 ### Data
+ df0, ### Data
+ byState = TRUE
){
### df0 names
names0 <- df0 |> names()
@@ -199,12 +164,15 @@ get_scenario_inputsList <- function(
slr0 <- NULL
gdp0 <- NULL
pop0 <- NULL
+ ### Pop columns
+ if(byState){popCols <- c("state", "postal")} else{c()}
+ popCol <- byState |> ifelse("state_pop", "reg_pop")
### Columns for scenarios
cTemp0 <- c("year", "temp_C_conus")
cTemp1 <- c("year", "temp_C")
cSlr <- c("year", "slr_cm")
cGdp <- c("year", "gdp_usd")
- cPop <- c("year", "region", "reg_pop")
+ cPop <- c("year", "region") |> c(popCols, popCol)
### Whether to create scenarios
doTemp0 <- (cTemp0 %in% names0) |> all()
doTemp1 <- (cTemp1 %in% names0) |> all()
@@ -217,28 +185,28 @@ get_scenario_inputsList <- function(
if(doTemp){
if(doTemp0){cTemp <- cTemp0}
else {cTemp <- cTemp1}
- temp0 <- df0 |> select(c(all_of(cTemp)))
+ temp0 <- df0 |> select(all_of(cTemp))
if(doTemp0){
- temp0 <- temp0 |> rename_at(.vars=c("temp_C_conus"), ~c("temp_C"))
- }
+ temp0 <- temp0 |> rename_at(vars("temp_C_conus"), ~c("temp_C"))
+ } ### End if(doTemp0)
list0[["tempInput"]] <- temp0
rm("temp0")
} ### End if(doTemp)
if(doSlr){
- slr0 <- df0 |> select(c(all_of(cSlr)))
+ slr0 <- df0 |> select(all_of(cSlr))
list0[["slrInput"]] <- slr0
rm("slr0")
} ### End if(doSlr)
if(doGdp){
- gdp0 <- df0 |> select(c(all_of(cGdp)))
+ gdp0 <- df0 |> select(all_of(cGdp))
list0[["gdpInput"]] <- gdp0
rm("gdp0")
} ### End if(doGdp)
if(doPop){
- pop0 <- df0 |> select(c(all_of(cPop)))
+ pop0 <- df0 |> select(all_of(cPop))
list0[["popInput"]] <- pop0
rm("pop0")
} ### End if(doPop)
@@ -275,11 +243,20 @@ agg_fredi_scenario <- function(
joinCols = c("year"),
aggLevels = c("modelaverage", "national")
){
+ ### Pop cols
+ byState <- "state" %in% (df0 |> names())
+ if(byState){stateCols <- c("state", "postal")} else{stateCols <- c()}
+ popCol <- byState |> ifelse("state_pop", "reg_pop")
### Filter to grouping columns
drop0 <- scenCols[!(scenCols %in% joinCols)]
### Run FrEDI
- group0 <- c("sector", "variant", "impactYear", "impactType", "model_type", "model", "region") |> c(drop0)
+ group0 <- c("sector", "variant", "impactType", "impactYear")
+ group0 <- group0 |> c("region", stateCols)
+ group0 <- group0 |> c("model_type", "model")
+ group0 <- group0 |> c("sectorprimary", "includeaggregate")
+ group0 <- group0 |> c(drop0)
df0 <- df0 |> FrEDI::aggregate_impacts(aggLevels = aggLevels, groupByCols = group0)
+ # df0 <- df0 |> FrEDI::aggregate_impacts(aggLevels = aggLevels)
### Return
return(df0)
} ### End agg_fredi_scenario
@@ -301,22 +278,30 @@ run_scenario <- function(
rm("df0")
### Run FrEDI
if(fredi){
- df_x0 <- df_x0 |> run_fredi_scenario(
+ df_x0 <- df_x0 |> run_fredi_scenario(
sectors = sectors,
scenCols = scenCols,
joinCols = joinCols
) ### End run_fredi_scenario
} ### End if(fredi)
+ # "got here1" |> print(); df_x0 |> glimpse()
+
### Aggregate FrEDI
agg0 <- !("none" %in% aggLevels)
# agg0 |> print()
if(agg0){
- df_x0 <- df_x0 |> agg_fredi_scenario(
+ # "got here1" |> print()
+ df_x0 <- df_x0 |> agg_fredi_scenario(
scenCols = scenCols,
joinCols = joinCols,
aggLevels = aggLevels
) ### End run_fredi_scenario
} ### End if(agg0)
+ # "got here2" |> print(); df_x0 |> glimpse()
+
+ ### Format other values
+ mutate0 <- c("temp_C_conus", "temp_C_global", "slr_cm")
+ df_x0 <- df_x0 |> mutate_at(vars(mutate0), as.numeric)
### Return
return(df_x0)
@@ -352,7 +337,8 @@ run_scenarios <- function(
return(df_x)
}) ### End function(.x), walk
### Bind values into a list
- df0 <- list0 %>% (function(x){do.call(rbind, x)})
+ # df0 <- list0 %>% (function(x){do.call(rbind, x)})
+ df0 <- list0 |> bind_rows()
### Return
return(df0)
@@ -407,7 +393,8 @@ sum_impacts_byDoW <- function(
return(df_z)
})
### Bind together
- df0 <- list0 %>% (function(x){do.call(rbind, x)})
+ # df0 <- list0 %>% (function(x){do.call(rbind, x)})
+ df0 <- list0 |> bind_rows()
rm(list0)
### Adjust values
df0[[adjCol]] <- df0[["annual_impacts"]] * adjVal
@@ -468,7 +455,8 @@ sum_impacts_byDoW_years <- function(
return(df_x)
}) ### End walk
### Bind values together
- df0 <- list0 %>% (function(x){do.call(rbind, x)})
+ # df0 <- list0 %>% (function(x){do.call(rbind, x)})
+ df0 <- list0 |> bind_rows()
rm(list0)
### Convert to tibble
df0 <- df0 |> as_tibble()
@@ -488,19 +476,19 @@ get_fig7_slrDataObj <- function(
### Sector Info
### Variant Info
### Model Info
- dfSectors <- "co_sectors" |> get_ciraDataObj()
- dfVariant <- "co_variants" |> get_ciraDataObj()
- slrRef <- "co_models" |> get_ciraDataObj()
+ dfSectors <- "co_sectors" |> get_frediDataObj()
+ dfVariant <- "co_variants" |> get_frediDataObj()
+ slrRef <- "co_models" |> get_frediDataObj()
### SLR Driver values
### SLR Scaled impct values
- if(drivers){slrCm <- "slr_cm" |> get_ciraDataObj()}
- if(impacts){slrImp <- "slrImpacts" |> get_ciraDataObj()}
+ if(drivers){slrCm <- "slr_cm" |> get_frediDataObj()}
+ if(impacts){slrImp <- "slrImpacts" |> get_frediDataObj(listSub="stateData")}
###### SLR Models ######
### Format SLR Models
slrRef <- slrRef |> filter(modelType=="slr")
- slrRef <- slrRef |> rename_at(.vars=c("model_label"), ~c("model"))
+ slrRef <- slrRef |> rename_at(vars("model_label"), ~c("model"))
###### Levels & Labels ######
### Initial levels & labels
@@ -513,7 +501,7 @@ get_fig7_slrDataObj <- function(
# slrLevels |> print(); slrLabels |> print()
### Vector of model labels and number of models
c_slrs <- slrLabels
- n_slrs <- c_slrs %>% length
+ n_slrs <- c_slrs |> length()
###### Sectors Data ######
### Format Sectors data
@@ -845,17 +833,19 @@ plot_DoW <- function(
df_types <- tibble()
if(do_gcm){
df_gcm <- "GCM" %>%
- map(function(.x){tibble(type=.x, year=years0, label=.x |> paste0("_", years0))}) %>%
- (function(y){do.call(rbind, y)})
+ # map(function(.x){tibble(type=.x, year=years0, label=.x |> paste0("_", years0))}) %>%
+ # (function(y){do.call(rbind, y)})
+ map(function(.x){tibble(type=.x, year=years0, label=.x |> paste0("_", years0))}) |>
+ bind_rows()
df_types <- df_types |> rbind(df_gcm)
rm(df_gcm)
- }
+ } ### if(do_gcm)
### SLR data
if(do_slr){
df_slr <- tibble(type="SLR", year="all", label="SLR" |> paste0("_", "all"))
df_types <- df_types |> rbind(df_slr)
rm(df_slr)
- }
+ } ### if(do_slr)
# "got here" |> print()
# df_types |> glimpse()
@@ -864,7 +854,7 @@ plot_DoW <- function(
### Initialize list
list0 <- pList0 %>% pmap(function(x1, x2){
x1 |> paste0("_", x2) |> print()
- plot_y <- plot_DoW_by_modelYear(
+ plot_y <- plot_DoW_by_modelYear(
df0 = df0, ### Data (e.g., output from sum_impactsByDegree)
type0 = x1, ### Model type: GCM or SLR
year0 = x2,
@@ -884,7 +874,7 @@ plot_DoW <- function(
### Add list names
# list0 |> print()
labels0 <- df_types[["label"]]
- list0 <- list0 |> addListNames(labels0)
+ list0 <- list0 |> set_names(labels0)
### Return
return(list0)
@@ -922,10 +912,12 @@ plot_DoW_by_sector <- function(
df1 <- df0 |> filter(model_type=="GCM")
sectors0 <- df1[["sector"]] |> unique()
df_x <- sectors0 |> map(function(.y){tibble(type=.x, sector=.y, year=years, label=.y |> paste0("_", years))})
- df_x <- df_x %>% (function(y){do.call(rbind, y)})
+ # df_x <- df_x %>% (function(y){do.call(rbind, y)})
+ df_x <- df_x |> bind_rows()
return(df_x)
})
- df_gcm <- df_gcm %>% (function(y){do.call(rbind, y)})
+ # df_gcm <- df_gcm %>% (function(y){do.call(rbind, y)})
+ df_gcm <- df_gcm |> bind_rows()
df_types <- df_types |> rbind(df_gcm)
rm(df_gcm)
} ### End if(do_gcm)
@@ -972,12 +964,12 @@ plot_DoW_by_sector <- function(
})
### Add names
labels_x <- types_x[["label"]]
- list_x <- list_x |> addListNames(labels_x)
+ list_x <- list_x |> set_names(labels_x)
### Return
return(list_x)
})
### Add names
- list0 <- list0 |> addListNames(models)
+ list0 <- list0 |> set_names(models)
### Return
return(list0)
} ### End plot_DoW_by_sector
@@ -1010,10 +1002,10 @@ plot_slr_scenarios <- function(
scale_x_continuous("Year") +
scale_y_continuous("GMSL (cm)")
- plot0 <- plot0 +
- theme(panel.background = element_rect(fill="white")) +
- theme(panel.grid = element_line(color="lightgrey")) +
- theme(axis.line = element_line(color="darkgrey"))
+ # plot0 <- plot0 +
+ # theme(panel.background = element_rect(fill="white")) +
+ # theme(panel.grid = element_line(color="lightgrey")) +
+ # theme(axis.line = element_line(color="darkgrey"))
plot0 <- plot0 +
ggtitle(title0, subTitle0) +
diff --git a/FrEDI/R/utils_import_inputs.R b/FrEDI/R/utils_import_inputs.R
index b764d245..8de0e0e8 100644
--- a/FrEDI/R/utils_import_inputs.R
+++ b/FrEDI/R/utils_import_inputs.R
@@ -23,7 +23,7 @@ fun_tryInput <- function(
fileExists <- filename |> file.exists()
### If the file exists, try loading the file and then check the class of the result
if(fileExists){
- fileInput <- try(filename |> read.csv(), silent=T)
+ fileInput <- try(filename |> read.csv(check.names = F), silent=T)
classInput <- fileInput |> class()
inputExists <- ("data.frame" %in% classInput)
diff --git a/FrEDI/R/utils_plot_DOW_byImpactType.R b/FrEDI/R/utils_plot_DOW_byImpactType.R
index c73af167..82ec1982 100644
--- a/FrEDI/R/utils_plot_DOW_byImpactType.R
+++ b/FrEDI/R/utils_plot_DOW_byImpactType.R
@@ -157,39 +157,46 @@ plot_DOW_byImpactType <- function(
# subtitle0 <- variant0
###### Create the plot ######
- plot0 <- df0 |> ggplot(aes(x=.data[[xCol]], y=.data[[yCol]]))
+ plot0 <- df0 |> ggplot(aes(x=.data[[xCol]], y=.data[[yCol]]))
### Add Geoms
- plot0 <- plot0 + geom_line (aes(color = model))
+ plot0 <- plot0 + geom_line (aes(color = model))
if(do_slr){df_points0 <- df0 |> filter(year %in% x_breaks)}
else {df_points0 <- df0}
- plot0 <- plot0 + geom_point(data=df_points0, aes(color = model, shape=model))
+ plot0 <- plot0 + geom_point(data=df_points0, aes(color = model, shape=model))
### Add Scales
- plot0 <- plot0 + scale_color_discrete(lgdLbl)
- plot0 <- plot0 + scale_shape_discrete(lgdLbl)
+ # plot0 <- plot0 + scale_shape_discrete(lgdTitle0)
+ shapeLvls <- df0[["model"]] |> unique() |> sort()
+ numShapes <- shapeLvls |> length()
+ shapeVals <- c(1:numShapes)
+ # shapeLvls |> print()
+ # plot0 <- plot0 + scale_shape_discrete(lgdLbl)
+ plot0 <- plot0 + scale_shape_manual(lgdLbl, breaks=shapeLvls, values=shapeVals)
+ plot0 <- plot0 + scale_color_discrete(lgdLbl)
+ # plot0 <- plot0 + scale_shape_discrete(lgdLbl)
###### Adjust legend title ######
if(hasLgdPos){plot0 <- plot0 + guides(color = guide_legend(title.position = lgdPos))}
###### Add themes and title ######
- plot0 <- plot0 + ggtitle(title0, subtitle0)
+ plot0 <- plot0 + ggtitle(title0, subtitle0)
###### Add scales ######
- plot0 <- plot0 + scale_x_continuous(xTitle, breaks = x_breaks, limits = x_limits)
- # plot0 <- plot0 + scale_y_continuous(yTitle)
- plot0 <- plot0 + scale_y_continuous(y_label)
+ plot0 <- plot0 + scale_x_continuous(xTitle, breaks = x_breaks, limits = x_limits)
+ # plot0 <- plot0 + scale_y_continuous(yTitle)
+ plot0 <- plot0 + scale_y_continuous(y_label)
###### Adjust Appearance ######
- # plot0 <- plot0 + theme(panel.background = element_rect(fill="white"))
- # plot0 <- plot0 + theme(panel.grid = element_line(color="lightgrey"))
- # plot0 <- plot0 + theme(plot.title = element_text(hjust = 0.5, size=11))
- plot0 <- plot0 + theme(plot.title = element_text(hjust = 0.5, size=11))
- plot0 <- plot0 + theme(plot.subtitle = element_text(hjust = 0.5, size=10))
- # plot0 <- plot0 + theme(axis.title.x = element_text(hjust = 0.5, size=9, color="white"))
- plot0 <- plot0 + theme(axis.title.x = element_text(hjust = 0.5, size=9))
- plot0 <- plot0 + theme(axis.title.y = element_text(hjust = 0.5, size=9))
- plot0 <- plot0 + theme(legend.position = "bottom")
+ # plot0 <- plot0 + theme(panel.background = element_rect(fill="white"))
+ # plot0 <- plot0 + theme(panel.grid = element_line(color="lightgrey"))
+ # plot0 <- plot0 + theme(plot.title = element_text(hjust = 0.5, size=11))
+ plot0 <- plot0 + theme(plot.title = element_text(hjust = 0.5, size=11))
+ plot0 <- plot0 + theme(plot.subtitle = element_text(hjust = 0.5, size=10))
+ # plot0 <- plot0 + theme(axis.title.x = element_text(hjust = 0.5, size=9, color="white"))
+ plot0 <- plot0 + theme(axis.title.x = element_text(hjust = 0.5, size=9))
+ plot0 <- plot0 + theme(axis.title.y = element_text(hjust = 0.5, size=9))
+ plot0 <- plot0 + theme(legend.position = "bottom")
###### Plot Index #####
###### If plotIndex, remove some plot elements
diff --git a/FrEDI/R/utils_plot_DOW_byImpactTypes.R b/FrEDI/R/utils_plot_DOW_byImpactTypes.R
index 6b35034e..ca384930 100644
--- a/FrEDI/R/utils_plot_DOW_byImpactTypes.R
+++ b/FrEDI/R/utils_plot_DOW_byImpactTypes.R
@@ -220,7 +220,7 @@ plot_DOW_byImpactTypes <- function(
})
### Name the plots
- listVars_j <- listVars_j |> addListNames(c_variants)
+ listVars_j <- listVars_j |> set_names(c_variants)
# return(listVars_j)
# "got here1..." |> print()
@@ -245,7 +245,7 @@ plot_DOW_byImpactTypes <- function(
### Name the plots
# listTypes_i |> length() |> print(); c_impTypes |> print()
- listTypes_i <- listTypes_i |> addListNames(c_impTypes)
+ listTypes_i <- listTypes_i |> set_names(c_impTypes)
# "got here3..." |> print()
# return(listTypes_i)
@@ -284,7 +284,7 @@ plot_DOW_byImpactTypes <- function(
return(plotGrid_i)
})
### Name the plots
- listYears0 <- listYears0 |> addListNames(c_impYears)
+ listYears0 <- listYears0 |> set_names(c_impYears)
###### Return ######
### Return the plot
diff --git a/FrEDI/R/utils_plot_DOW_byModelType.R b/FrEDI/R/utils_plot_DOW_byModelType.R
index 014e674d..16bfa608 100644
--- a/FrEDI/R/utils_plot_DOW_byModelType.R
+++ b/FrEDI/R/utils_plot_DOW_byModelType.R
@@ -143,7 +143,7 @@ plot_DOW_byModelType <- function(
### Add list names
# # x |> length() |> print()
- plotList_x <- plotList_x |> addListNames(c_sectors)
+ plotList_x <- plotList_x |> set_names(c_sectors)
###### Get Reference Plot ######
refPlot_x <- c_sectors[1] |> plot_DOW_bySector(
diff --git a/FrEDI/R/utils_plot_DOW_bySector.R b/FrEDI/R/utils_plot_DOW_bySector.R
index 3f3ff6ae..7b22e64f 100644
--- a/FrEDI/R/utils_plot_DOW_bySector.R
+++ b/FrEDI/R/utils_plot_DOW_bySector.R
@@ -84,17 +84,25 @@ plot_DOW_bySector <- function(
if(!hasMUnits){mUnit0 <- "cm"}
###### Create the plot ######
- # df0 %>% names() %>% print()
- # df0 %>% glimpse()
+ # df0 |> names() |> print()
+ # df0 |> glimpse()
plot0 <- df0 |> ggplot(aes(x=.data[[xCol]], y=.data[[yCol]]))
+ # plot0 <- df0 |> ggplot(aes(x=.data[[xCol]], y=.data[[yCol]], group=interaction(sector, model)))
### Add Geoms
plot0 <- plot0 + geom_line (aes(color = model))
+ # plot0 <- plot0 + geom_point(aes(color = model))
plot0 <- plot0 + geom_point(aes(color = model, shape=model))
### Add Scales
plot0 <- plot0 + scale_color_discrete(lgdTitle0)
- plot0 <- plot0 + scale_shape_discrete(lgdTitle0)
+ # plot0 <- plot0 + scale_shape_discrete(lgdTitle0)
+ shapeLvls <- df0[["model"]] |> unique() |> sort()
+ numShapes <- shapeLvls |> length()
+ shapeVals <- c(1:numShapes)
+ # shapeLvls |> print()
+ # plot0 <- plot0 + scale_shape_discrete(lgdTitle0)
+ plot0 <- plot0 + scale_shape_manual(lgdTitle0, breaks=shapeLvls, values=shapeVals)
plot0 <- plot0 + scale_x_continuous(xTitle0, limits = x_limits, breaks = x_breaks)
plot0 <- plot0 + scale_y_continuous(yTitle0, limits = y_limits, breaks = y_breaks)
diff --git a/FrEDI/R/utils_sv.R b/FrEDI/R/utils_sv.R
index 900d413d..4456a9e5 100644
--- a/FrEDI/R/utils_sv.R
+++ b/FrEDI/R/utils_sv.R
@@ -177,7 +177,7 @@ calc_tractScaledImpacts <- function(
### Names of functions
c_tracts <- funList |> names()
years_x <- driverValues$year |> as.vector()
- values_x <- driverValues[,xCol] |> as.vector()
+ values_x <- driverValues[,xCol] |> pull() |> as.vector()
funcs_x <- funList |> names()
# # c_tracts <- c_tracts[1:1e3]; funcs_x <- funcs_x[1:1e3]
# c_tracts <- c(29031880500);
diff --git a/FrEDI/scripts/create_DoW_results.R b/FrEDI/scripts/create_DoW_results.R
index 8fe5f851..656902b2 100644
--- a/FrEDI/scripts/create_DoW_results.R
+++ b/FrEDI/scripts/create_DoW_results.R
@@ -9,18 +9,32 @@ require(ggpubr)
###### create_DoW_results ######
create_DoW_results <- function(
sectors = FrEDI::get_sectorInfo(), ### Which sectors
- gcmYears = c(2090), ### Which years to report on for GCM sectors
+ gcmYears = c(2090), ### Which years to report on for GCM sectors
slrYears = c(2050, 2090), ### Which years to report on for SLR sectors
- silent = TRUE, ### Degree of messaging
- testing = FALSE, ### Whether to print out extra diagnostic values
- aggOnly = TRUE, ### Whether to only include sectors for which "includeaggregate==1" in Fig 7 plots
+ byState = TRUE, ### Whether values are by state or just by region
+ silent = TRUE, ### Degree of messaging
+ testing = FALSE, ### Whether to print out extra diagnostic values
+ aggOnly = TRUE, ### Whether to only include sectors for which "includeaggregate==1" in Fig 7 plots
loadCode = "project", ### Whether to load code as source or devtools
- fpath = "." , ### Path to main FrEDI directory to load code from if loadCode == "project" or loadCode == "package"
- saveFile = FALSE, ### Save file
+ fpath = "." , ### Path to main FrEDI directory to load code from if loadCode == "project" or loadCode == "package"
+ saveFile = FALSE, ### Save file
outPath = "." |> file.path("report_figures"), ### Path to save results if saveFile == TRUE
- img_dev = "pdf", ### Image device if saveFile == TRUE
- return = TRUE ### Whether to return list object
+ img_dev = "pdf", ### Image device if saveFile == TRUE
+ return = TRUE ### Whether to return list object
){
+ # sectors = FrEDI::get_sectorInfo() ### Which sectors
+ # gcmYears = c(2090) ### Which years to report on for GCM sectors
+ # slrYears = c(2050, 2090) ### Which years to report on for SLR sectors
+ # silent = TRUE ### Degree of messaging
+ # testing = TRUE ### Whether to print out extra diagnostic values
+ # byState = TRUE ### Whether values are by state or just by region
+ # aggOnly = TRUE ### Whether to only include sectors for which "includeaggregate==1" in Fig 7 plots
+ # loadCode = "project" ### Whether to load code as source or devtools
+ # fpath = "." ### Path to main FrEDI directory to load code from if loadCode == "project" or loadCode == "package"
+ # saveFile = FALSE ### Save file
+ # outPath = "." |> file.path("report_figures") ### Path to save results if saveFile == TRUE
+ # img_dev = "pdf" ### Image device if saveFile == TRUE
+ # return = TRUE ### Whether to return list object
###### Initial values ######
### Messaging
do_msg <- !silent
@@ -100,12 +114,15 @@ create_DoW_results <- function(
###### ** Constants ######
### Numeric columns: Specify so that we can print out the associated data
### Number of digits to format
- c_numVars <- c("driverValue", "gdp_usd", "national_pop", "gdp_percap", "reg_pop", "annual_impacts")
+ c_popCol <- byState |> ifelse("state_pop", "reg_pop")
+ # c_numVars <- c("driverValue", "gdp_usd", "national_pop", "gdp_percap", "reg_pop", "annual_impacts")
+ c_numVars <- c("driverValue", "gdp_usd", "national_pop", "gdp_percap") |> c(c_popCol) |> c("annual_impacts")
### Integer temperatures: data frame of inputs
conusPrefix0 <- "Other_Integer"
globalPrefix0 <- "preI_global"
### Temperatures
- c_conusTemps <- 0:7
+ # c_conusTemps <- 0:7
+ c_conusTemps <- 0:10
c_globalTemps <- c(1.487, 2.198)
### Numbers of scenarios
n_conusTemps <- c_conusTemps |> length()
@@ -136,7 +153,9 @@ create_DoW_results <- function(
x = df_scenarios[["temp_C" ]],
y = df_scenarios[["tempType"]],
z = df_scenarios[["prefix" ]]
- ) |> pmap(function(x, y, z){
+ )
+ ### Create constant temp scenarios
+ inputs_df_int <- inputs_df_int |> pmap(function(x, y, z){
create_constant_temp_scenario(
temp0 = x,
type0 = y,
@@ -162,6 +181,7 @@ create_DoW_results <- function(
scenCols = c("scenario", "year", "temp_C_conus", "temp_C_global", "slr_cm"),
joinCols = c("year")
)
+
### Glimpse results
if(return0) resultsList[["df_int_byType"]] <- df_int_byType
if(testing) df_int_byType |> glimpse()
@@ -182,7 +202,7 @@ create_DoW_results <- function(
###### ** Result Totals ######
if(testing|do_msg) "Aggregating integer scenario results..." |> message()
#### Aggregate Impact Types, Impact Years
- df_int_totals <- df_int_byType %>% run_scenarios(
+ df_int_totals <- df_int_byType |> run_scenarios(
col0 = "scenario",
fredi = FALSE,
aggLevels = c("impactyear", "impacttype"),
@@ -241,15 +261,15 @@ create_DoW_results <- function(
# codePath |> loadCustomFunctions()
if(testing|do_msg) "Plotting GCM results by sector, degree of warming (DOW)..." |> message()
plots_dow_gcm <- sum_gcm_totals |> plot_DoW(
- types0 = c("GCM"), ### Model type: GCM or SLR
- years = gcmYears,
- xCol = "driverValue",
- yCol = "annual_impacts",
- thresh0 = breakChars
- )
+ types0 = c("GCM"), ### Model type: GCM or SLR
+ years = gcmYears,
+ xCol = "driverValue",
+ yCol = "annual_impacts",
+ thresh0 = breakChars
+ )
### Glimpse
+ if(testing) plots_dow_gcm[["GCM_2090"]] |> print()
if(return0) resultsList[["plots_dow_gcm"]] <- plots_dow_gcm
- if(testing) plots_dow_gcm[["GCM_2010"]] |> print()
### Save
# codePath |> loadCustomFunctions()
if(saveFile){
@@ -280,8 +300,8 @@ create_DoW_results <- function(
silent = TRUE
)
### Glimpse
- if(return0) resultsList[["sum_gcm_byType"]] <- sum_gcm_byType
if(testing) sum_gcm_byType |> glimpse()
+ if(return0) resultsList[["sum_gcm_byType"]] <- sum_gcm_byType
### Save summary table
if(saveFile){
if(do_msg) paste0("Saving summary of GCM results by sector, impact type, degree of warming...") |> message()
@@ -294,13 +314,14 @@ create_DoW_results <- function(
if(testing|do_msg) "Plotting GCM results by sector, impact type, degree of warming (DOW)..." |> message()
plots_gcm_byType <- sum_gcm_byType |>
# filter(sector %in% c_sectorNames[c(10)]) |>
+ filter(!(sector %in% c("Roads"))) |>
plot_DoW_by_sector(
models = c("GCM"),
yCol = "annual_impacts"
)
### Glimpse
- if(return0) resultsList[["plots_gcm_byType"]] <- plots_gcm_byType
if(testing) plots_gcm_byType$GCM$`Extreme Temperature_2010`[["2010"]] |> print()
+ if(return0) resultsList[["plots_gcm_byType"]] <- plots_gcm_byType
### Save
if(saveFile){
if(do_msg) paste0("Saving plots of GCM results by sector, impact type, degree of warming...") |> message()
diff --git a/FrEDI/vignettes/articles/Example1.Rmd b/FrEDI/vignettes/articles/Example1.Rmd
index 9197fd94..81d76d38 100644
--- a/FrEDI/vignettes/articles/Example1.Rmd
+++ b/FrEDI/vignettes/articles/Example1.Rmd
@@ -222,10 +222,10 @@ output_df <- run_fredi(inputsList= inputs_list,
# Option: write output
## Write Full Dataframe to CSV (or feather)
-# write.csv(output_df, './output/example_output.csv')
+# write.csv(output_df$results, './output/example_output.csv')
#First five lines of output dataframe
-#output_df[5,]
+#output_df$results[1:5,]
```
diff --git a/FrEDI/vignettes/articles/Example1.html b/FrEDI/vignettes/articles/Example1.html
index a4a94a8e..b614a9d2 100644
--- a/FrEDI/vignettes/articles/Example1.html
+++ b/FrEDI/vignettes/articles/Example1.html
@@ -13,11 +13,24 @@