diff --git a/src/main/resources/config/config-template.conf b/src/main/resources/config/config-template.conf index 97deefbd79..474f75da7c 100644 --- a/src/main/resources/config/config-template.conf +++ b/src/main/resources/config/config-template.conf @@ -24,7 +24,7 @@ BaseRuntimeConfig { #@define extends BaseRuntimeConfig LoadRuntimeConfig { baseRuntimeConfig: BaseRuntimeConfig - modelBehaviour: string # How the model behaves. Possible values: fix, profile, random + modelBehaviour: string # How the model behaves. Possible values: fix, profile, random, markov reference: string # Scaling reference for the load model. Possible values: power, energy } diff --git a/src/main/resources/load/markov/appliances/average_hh.csv b/src/main/resources/load/markov/appliances/average_hh.csv new file mode 100644 index 0000000000..f19dbc330d --- /dev/null +++ b/src/main/resources/load/markov/appliances/average_hh.csv @@ -0,0 +1,2 @@ +washing_machine,dryer,dish_washer,stove,fridge,freezer,television,video_recorder,pc,telecommunication,lighting,water_heating,other_load +0.972,0.394,0.686,0.984,1.219,0.561,1.58,0.9,1.649,2.963,2.5,0.3,1 \ No newline at end of file diff --git a/src/main/resources/load/markov/appliances/by_Type.csv b/src/main/resources/load/markov/appliances/by_Type.csv new file mode 100644 index 0000000000..5115154073 --- /dev/null +++ b/src/main/resources/load/markov/appliances/by_Type.csv @@ -0,0 +1,4 @@ +type,washing_machine,dryer,dish_washer,stove,fridge,freezer,television,video_recorder,pc,telecommunication,lighting,water_heating,other_load +flat,0.926,0.269,0.545,0.94,1.088,0.368,1.354,0.807,1.453,2.535,1.5,0.1,1 +house,1.032,0.561,0.873,1.043,1.393,0.817,1.88,1.023,1.91,3.53,2.5,0.3,1.5 + diff --git a/src/main/resources/load/markov/appliances/by_income.csv b/src/main/resources/load/markov/appliances/by_income.csv new file mode 100644 index 0000000000..7e06dca7a2 --- /dev/null +++ b/src/main/resources/load/markov/appliances/by_income.csv @@ -0,0 +1,10 @@ +income,washing_machine,dryer,dish_washer,stove,fridge,freezer,television,video_recorder,pc,telecommunication,lighting,water_heating,other_load +below 900,0.835,0.154,0.306,0.885,1.024,0.286,1.05,0.559,0.953,1.807,1,0.1,1.3 +from 900 to 1300,0.924,0.219,0.462,0.926,1.059,0.388,1.232,0.637,1.038,2.093,1.2,0.1,1.4 +from 1300 to 1500,0.946,0.269,0.555,0.944,1.099,0.456,1.349,0.721,1.166,2.302,1.8,0.1,1.5 +from 1500 to 2000,0.964,0.33,0.645,0.963,1.14,0.515,1.486,0.83,1.352,2.574,2,0.2,1.6 +from 2000 to 2600,0.996,0.444,0.77,0.998,1.238,0.635,1.665,0.949,1.656,3.082,2.3,0.2,1.8 +from 2600 to 3600,1.02,0.53,0.875,1.03,1.317,0.691,1.871,1.105,2.095,3.644,2.8,0.3,2 +from 3600 to 5000,1.041,0.616,0.954,1.068,1.447,0.751,2.03,1.221,2.499,4.177,3,0.3,2.3 +from 5000 to 18000,1.075,0.694,1.009,1.099,1.59,0.82,2.15,1.335,3.04,4.708,3.2,0.3,2.8 + diff --git a/src/main/resources/load/markov/appliances/by_inhabitants.csv b/src/main/resources/load/markov/appliances/by_inhabitants.csv new file mode 100644 index 0000000000..8bf1eb9c76 --- /dev/null +++ b/src/main/resources/load/markov/appliances/by_inhabitants.csv @@ -0,0 +1,7 @@ +inhabitants,washing_machine,dryer,dish_washer,stove,fridge,freezer,television,video_recorder,pc,telecommunication,lighting,water_heating,other_load +1,0.894,0.223,0.459,0.927,1.055,0.346,1.166,0.645,1.021,1.935,1,0.097,1 +2,1.007,0.431,0.772,1.004,1.282,0.661,1.703,0.923,1.656,3.096,2,0.153,1.5 +3,1.032,0.556,0.894,1.036,1.356,0.711,2.034,1.218,2.451,4.063,2.333,0.208,2 +4,1.05,0.661,0.961,1.052,1.416,0.796,2.099,1.322,2.743,4.601,2.833,0.25,2.5 +5,1.098,0.732,0.988,1.079,1.494,0.904,2.155,1.362,3.133,5.312,3,0.292,3.5 + diff --git a/src/main/resources/load/markov/appliances/load_ts.csv b/src/main/resources/load/markov/appliances/load_ts.csv new file mode 100644 index 0000000000..812746be11 --- /dev/null +++ b/src/main/resources/load/markov/appliances/load_ts.csv @@ -0,0 +1,27 @@ +washing_machine,dish_washer,dryer,stove,fridge,freezer,television,video_recorder,pc,telecommunication,lighting,water_heating,other_load +100,80,2000,700,125,130,150,30,130,40,60,18000,55 +2000,2000,2000,700,1,1,150,30,130,40,60,0,0 +900,80,2000,700,1,1,150,30,130,40,60,0,0 +100,80,1600,700,125,130,150,30,130,40,60,0,0 +100,80,1300,0,1,1,150,30,130,40,0,0,0 +300,2000,940,0,1,1,150,30,130,40,0,0,0 +50,300,0,0,125,130,0,30,130,40,0,0,0 +0,150,0,0,1,1,0,30,130,40,0,0,0 +0,0,0,0,1,1,0,0,0,40,0,0,0 +0,0,0,0,125,130,0,0,0,40,0,0,0 +0,0,0,0,1,1,0,0,0,40,0,0,0 +0,0,0,0,1,1,0,0,0,40,0,0,0 +0,0,0,0,125,130,0,0,0,40,0,0,0 +0,0,0,0,1,1,0,0,0,40,0,0,0 +0,0,0,0,1,1,0,0,0,40,0,0,0 +0,0,0,0,125,130,0,0,0,40,0,0,0 +0,0,0,0,1,1,0,0,0,40,0,0,0 +0,0,0,0,1,1,0,0,0,40,0,0,0 +0,0,0,0,125,130,0,0,0,40,0,0,0 +0,0,0,0,1,1,0,0,0,40,0,0,0 +0,0,0,0,1,1,0,0,0,40,0,0,0 +0,0,0,0,125,130,0,0,0,40,0,0,0 +0,0,0,0,1,1,0,0,0,40,0,0,0 +0,0,0,0,1,1,0,0,0,40,0,0,0 +0,0,0,0,125,130,0,0,0,40,0,0,0 +0,0,0,0,1,1,0,0,0,40,0,0,0 diff --git a/src/main/resources/load/markov/probabilities/switch_on_probabilities/dish_washer.csv b/src/main/resources/load/markov/probabilities/switch_on_probabilities/dish_washer.csv new file mode 100644 index 0000000000..df55e2fb1c --- /dev/null +++ b/src/main/resources/load/markov/probabilities/switch_on_probabilities/dish_washer.csv @@ -0,0 +1,25 @@ +summer_weekday;summer_saturday;summer_sunday;winter_weekday;winter_saturday;winter_sunday +0.001;0;0.014;0.005;0.011;0 +0.002;0;0;0.003;0.022;0.005 +0;0;0;0;0.022;0.018 +0;0;0;0;0;0.009 +0;0;0;0;0;0.004 +0;0;0;0;0;0.004 +0.04;0;0.014;0.003;0.022;0.015 +0.036;0.005;0.005;0.022;0.022;0.005 +0.04;0.015;0.03;0.054;0.033;0.005 +0.02;0.053;0.041;0.049;0.056;0.036 +0.068;0.102;0.082;0.033;0.089;0.091 +0.032;0.097;0.068;0.054;0.089;0.091 +0.04;0.049;0.055;0.049;0.078;0.109 +0.08;0.097;0.082;0.049;0.078;0.054 +0.12;0.146;0.11;0.082;0.112;0.154 +0.08;0.087;0.055;0.109;0.096;0.091 +0.06;0.049;0.055;0.054;0.045;0.073 +0.04;0.049;0.082;0.027;0.067;0.054 +0.06;0.039;0.088;0.049;0.033;0.045 +0.12;0.049;0.041;0.082;0.022;0.036 +0.1;0.063;0.049;0.131;0.033;0.045 +0.04;0.058;0.068;0.082;0.033;0.027 +0.02;0.034;0.041;0.049;0.022;0.018 +0.004;0.01;0.019;0.014;0.011;0.009 \ No newline at end of file diff --git a/src/main/resources/load/markov/probabilities/switch_on_probabilities/dryer.csv b/src/main/resources/load/markov/probabilities/switch_on_probabilities/dryer.csv new file mode 100644 index 0000000000..97ddf3ecc0 --- /dev/null +++ b/src/main/resources/load/markov/probabilities/switch_on_probabilities/dryer.csv @@ -0,0 +1,25 @@ +summer_weekday;summer_saturday;summer_sunday;winter_weekday;winter_saturday;winter_sunday +0;0;0;0;0;0 +0;0;0;0;0;0 +0;0;0;0;0;0 +0;0;0;0;0.006;0 +0;0;0;0;0.006;0 +0;0;0;0;0;0 +0.011;0;0;0;0;0 +0.034;0;0;0;0.057;0 +0.057;0.048;0;0;0.017;0 +0.091;0.048;0;0.008;0.006;0.004 +0.102;0.024;0.035;0.04;0.029;0.036 +0.091;0.071;0.083;0.032;0.029;0.029 +0.08;0.143;0.139;0.065;0.046;0.128 +0.136;0.119;0.069;0.105;0.069;0.128 +0.091;0.071;0.021;0.242;0.171;0.146 +0.045;0.071;0.007;0.081;0.229;0.073 +0.023;0.024;0.028;0.008;0.057;0.066 +0.034;0.071;0.139;0.008;0.114;0.066 +0.08;0.024;0.125;0.024;0.103;0.044 +0.057;0.071;0.118;0.032;0.029;0.036 +0.011;0.119;0.083;0.048;0.014;0.026 +0.023;0.048;0.125;0.121;0.009;0.044 +0.023;0.038;0.021;0.105;0.006;0.109 +0.011;0.01;0.007;0.081;0.006;0.066 diff --git a/src/main/resources/load/markov/probabilities/switch_on_probabilities/freezer.csv b/src/main/resources/load/markov/probabilities/switch_on_probabilities/freezer.csv new file mode 100644 index 0000000000..665e3e480e --- /dev/null +++ b/src/main/resources/load/markov/probabilities/switch_on_probabilities/freezer.csv @@ -0,0 +1,25 @@ +summer_weekday;summer_saturday;summer_sunday;winter_weekday;winter_saturday;winter_sunday +0.036;0.036;0.036;0.036;0.036;0.036 +0.036;0.036;0.036;0.036;0.036;0.036 +0.036;0.036;0.036;0.036;0.036;0.036 +0.036;0.036;0.036;0.036;0.036;0.036 +0.036;0.036;0.036;0.036;0.036;0.036 +0.036;0.036;0.036;0.036;0.036;0.036 +0.04;0.04;0.04;0.04;0.04;0.04 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.04;0.04;0.04;0.04;0.04;0.04 +0.036;0.036;0.036;0.036;0.036;0.036 diff --git a/src/main/resources/load/markov/probabilities/switch_on_probabilities/fridge.csv b/src/main/resources/load/markov/probabilities/switch_on_probabilities/fridge.csv new file mode 100644 index 0000000000..665e3e480e --- /dev/null +++ b/src/main/resources/load/markov/probabilities/switch_on_probabilities/fridge.csv @@ -0,0 +1,25 @@ +summer_weekday;summer_saturday;summer_sunday;winter_weekday;winter_saturday;winter_sunday +0.036;0.036;0.036;0.036;0.036;0.036 +0.036;0.036;0.036;0.036;0.036;0.036 +0.036;0.036;0.036;0.036;0.036;0.036 +0.036;0.036;0.036;0.036;0.036;0.036 +0.036;0.036;0.036;0.036;0.036;0.036 +0.036;0.036;0.036;0.036;0.036;0.036 +0.04;0.04;0.04;0.04;0.04;0.04 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.045;0.045;0.045;0.045;0.045;0.045 +0.04;0.04;0.04;0.04;0.04;0.04 +0.036;0.036;0.036;0.036;0.036;0.036 diff --git a/src/main/resources/load/markov/probabilities/switch_on_probabilities/lighting.csv b/src/main/resources/load/markov/probabilities/switch_on_probabilities/lighting.csv new file mode 100644 index 0000000000..7148597106 --- /dev/null +++ b/src/main/resources/load/markov/probabilities/switch_on_probabilities/lighting.csv @@ -0,0 +1,25 @@ +summer_weekday;summer_saturday;summer_sunday;winter_weekday;winter_saturday;winter_sunday +0.018;0.027;0.017;0.014;0.019;0.012 +0.011;0.014;0.009;0.008;0.01;0.007 +0.007;0.008;0.005;0.005;0.006;0.004 +0.005;0.005;0.003;0.004;0.004;0.002 +0.005;0.003;0.002;0.003;0.002;0.001 +0.01;0.003;0.002;0.007;0.002;0.002 +0.045;0.016;0.01;0.034;0.012;0.007 +0.053;0.04;0.028;0.048;0.035;0.024 +0.038;0.046;0.043;0.054;0.062;0.057 +0.026;0.035;0.04;0.044;0.056;0.064 +0.019;0.026;0.035;0.027;0.036;0.048 +0.018;0.024;0.033;0.026;0.033;0.045 +0.021;0.027;0.033;0.031;0.038;0.046 +0.019;0.026;0.027;0.03;0.039;0.041 +0.019;0.025;0.023;0.035;0.045;0.041 +0.022;0.028;0.025;0.048;0.06;0.053 +0.031;0.036;0.032;0.057;0.063;0.056 +0.053;0.054;0.051;0.073;0.071;0.067 +0.101;0.095;0.096;0.088;0.079;0.079 +0.126;0.114;0.122;0.095;0.081;0.087 +0.125;0.112;0.127;0.094;0.08;0.09 +0.12;0.109;0.124;0.09;0.078;0.088 +0.082;0.085;0.084;0.061;0.061;0.06 +0.028;0.041;0.028;0.021;0.029;0.02 diff --git a/src/main/resources/load/markov/probabilities/switch_on_probabilities/pc.csv b/src/main/resources/load/markov/probabilities/switch_on_probabilities/pc.csv new file mode 100644 index 0000000000..08393090ca --- /dev/null +++ b/src/main/resources/load/markov/probabilities/switch_on_probabilities/pc.csv @@ -0,0 +1,25 @@ +summer_weekday;summer_saturday;summer_sunday;winter_weekday;winter_saturday;winter_sunday +0.006;0.005;0.008;0.006;0.005;0.008 +0.003;0.003;0.004;0.003;0.003;0.004 +0.002;0.001;0.002;0.002;0.001;0.002 +0.001;0.001;0.001;0.001;0.001;0.001 +0.001;0;0;0.001;0;0 +0.002;0.001;0.001;0.002;0.001;0.001 +0.01;0.004;0.003;0.01;0.004;0.003 +0.024;0.013;0.011;0.024;0.013;0.011 +0.043;0.038;0.034;0.043;0.038;0.034 +0.056;0.063;0.067;0.056;0.063;0.067 +0.052;0.065;0.088;0.052;0.065;0.088 +0.056;0.07;0.096;0.056;0.07;0.096 +0.042;0.054;0.066;0.042;0.054;0.066 +0.047;0.061;0.064;0.047;0.061;0.064 +0.05;0.065;0.053;0.05;0.065;0.053 +0.052;0.063;0.047;0.052;0.063;0.047 +0.062;0.065;0.051;0.062;0.065;0.051 +0.076;0.071;0.063;0.076;0.071;0.063 +0.076;0.064;0.065;0.076;0.064;0.065 +0.086;0.068;0.07;0.086;0.068;0.07 +0.094;0.074;0.075;0.094;0.074;0.075 +0.089;0.073;0.073;0.089;0.073;0.073 +0.053;0.053;0.044;0.053;0.053;0.044 +0.017;0.024;0.013;0.017;0.024;0.013 diff --git a/src/main/resources/load/markov/probabilities/switch_on_probabilities/stove.csv b/src/main/resources/load/markov/probabilities/switch_on_probabilities/stove.csv new file mode 100644 index 0000000000..8e857f6d9f --- /dev/null +++ b/src/main/resources/load/markov/probabilities/switch_on_probabilities/stove.csv @@ -0,0 +1,25 @@ +summer_weekday;summer_saturday;summer_sunday;winter_weekday;winter_saturday;winter_sunday +0;0;0;0;0;0 +0;0;0;0;0;0 +0.002;0;0;0;0;0 +0;0;0;0;0;0 +0;0;0;0;0;0 +0.022;0;0;0.007;0.003;0 +0.035;0.007;0.004;0.032;0.009;0.002 +0.026;0.03;0.029;0.035;0.021;0.011 +0.031;0.052;0.066;0.035;0.036;0.04 +0.031;0.067;0.088;0.039;0.053;0.086 +0.044;0.078;0.088;0.046;0.065;0.12 +0.096;0.137;0.195;0.123;0.131;0.211 +0.148;0.16;0.195;0.168;0.172;0.208 +0.096;0.093;0.074;0.105;0.128;0.086 +0.078;0.052;0.033;0.035;0.047;0.029 +0.074;0.045;0.029;0.028;0.045;0.023 +0.07;0.037;0.022;0.042;0.045;0.023 +0.052;0.045;0.022;0.063;0.047;0.034 +0.044;0.059;0.055;0.095;0.071;0.051 +0.052;0.059;0.048;0.07;0.059;0.046 +0.044;0.037;0.029;0.035;0.033;0.02 +0.026;0.022;0.015;0.021;0.018;0.006 +0.017;0.015;0.004;0.014;0.012;0.003 +0.013;0.005;0.001;0.007;0.006;0.001 diff --git a/src/main/resources/load/markov/probabilities/switch_on_probabilities/telecommunication.csv b/src/main/resources/load/markov/probabilities/switch_on_probabilities/telecommunication.csv new file mode 100644 index 0000000000..ffc09d6c7e --- /dev/null +++ b/src/main/resources/load/markov/probabilities/switch_on_probabilities/telecommunication.csv @@ -0,0 +1,25 @@ +summer_weekday;summer_saturday;summer_sunday;winter_weekday;winter_saturday;winter_sunday +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 +0.042;0.042;0.042;0.042;0.042;0.042 diff --git a/src/main/resources/load/markov/probabilities/switch_on_probabilities/television.csv b/src/main/resources/load/markov/probabilities/switch_on_probabilities/television.csv new file mode 100644 index 0000000000..db74bb769b --- /dev/null +++ b/src/main/resources/load/markov/probabilities/switch_on_probabilities/television.csv @@ -0,0 +1,25 @@ +summer_weekday;summer_saturday;summer_sunday;winter_weekday;winter_saturday;winter_sunday +0.011;0.01;0.02;0.011;0.01;0.02 +0.005;0.005;0.009;0.005;0.005;0.009 +0.002;0.002;0.004;0.002;0.002;0.004 +0.001;0.001;0.002;0.001;0.001;0.002 +0;0;0;0;0;0 +0.001;0;0;0.001;0;0 +0.002;0.001;0.001;0.002;0.001;0.001 +0.004;0.001;0.002;0.004;0.001;0.002 +0.005;0.003;0.002;0.005;0.003;0.002 +0.006;0.004;0.006;0.006;0.004;0.006 +0.006;0.006;0.01;0.006;0.006;0.01 +0.008;0.007;0.013;0.008;0.007;0.013 +0.009;0.008;0.017;0.009;0.008;0.017 +0.012;0.013;0.02;0.012;0.013;0.02 +0.016;0.02;0.029;0.016;0.02;0.029 +0.023;0.032;0.038;0.023;0.032;0.038 +0.034;0.04;0.048;0.034;0.04;0.048 +0.047;0.055;0.058;0.047;0.055;0.058 +0.083;0.089;0.079;0.083;0.089;0.079 +0.152;0.132;0.123;0.152;0.132;0.123 +0.221;0.193;0.2;0.221;0.193;0.2 +0.206;0.191;0.191;0.206;0.191;0.191 +0.113;0.131;0.099;0.113;0.131;0.099 +0.032;0.056;0.028;0.032;0.056;0.028 diff --git a/src/main/resources/load/markov/probabilities/switch_on_probabilities/video_recorder.csv b/src/main/resources/load/markov/probabilities/switch_on_probabilities/video_recorder.csv new file mode 100644 index 0000000000..2563abaa19 --- /dev/null +++ b/src/main/resources/load/markov/probabilities/switch_on_probabilities/video_recorder.csv @@ -0,0 +1,25 @@ +summer_weekday;summer_saturday;summer_sunday;winter_weekday;winter_saturday;winter_sunday +0.028;0.017;0.027;0.028;0.017;0.027 +0.013;0.008;0.013;0.013;0.008;0.013 +0.006;0.003;0.005;0.006;0.003;0.005 +0.002;0.001;0.002;0.002;0.001;0.002 +0;0;0;0;0;0 +0;0;0;0;0;0 +0;0;0;0;0;0 +0;0;0;0;0;0 +0;0;0;0;0;0 +0;0;0.011;0;0;0.011 +0;0.006;0.014;0;0.006;0.014 +0.019;0.018;0.031;0.019;0.018;0.031 +0;0.024;0.039;0;0.024;0.039 +0.019;0.039;0.045;0.019;0.039;0.045 +0.019;0.069;0.085;0.019;0.069;0.085 +0.038;0.093;0.107;0.038;0.093;0.107 +0.057;0.105;0.121;0.057;0.105;0.121 +0.057;0.096;0.096;0.057;0.096;0.096 +0.057;0.075;0.068;0.057;0.075;0.068 +0.072;0.063;0.056;0.072;0.063;0.056 +0.153;0.12;0.076;0.153;0.12;0.076 +0.21;0.114;0.087;0.21;0.114;0.087 +0.167;0.084;0.076;0.167;0.084;0.076 +0.081;0.063;0.039;0.081;0.063;0.039 diff --git a/src/main/resources/load/markov/probabilities/switch_on_probabilities/washing_machine.csv b/src/main/resources/load/markov/probabilities/switch_on_probabilities/washing_machine.csv new file mode 100644 index 0000000000..562c47b796 --- /dev/null +++ b/src/main/resources/load/markov/probabilities/switch_on_probabilities/washing_machine.csv @@ -0,0 +1,25 @@ +summer_weekday;summer_saturday;summer_sunday;winter_weekday;winter_saturday;winter_sunday +0;0;0;0;0;0 +0;0;0;0;0;0 +0;0;0;0;0;0 +0;0;0;0;0;0 +0;0;0;0;0;0 +0;0;0;0;0;0 +0.012;0.007;0.004;0.01;0.01;0 +0.058;0.015;0.018;0.036;0.025;0.007 +0.081;0.081;0.024;0.077;0.049;0.018 +0.092;0.111;0.059;0.128;0.088;0.059 +0.104;0.118;0.11;0.123;0.128;0.117 +0.087;0.103;0.123;0.092;0.128;0.117 +0.081;0.118;0.11;0.092;0.088;0.103 +0.058;0.089;0.066;0.072;0.079;0.088 +0.052;0.066;0.07;0.062;0.079;0.081 +0.058;0.052;0.064;0.056;0.074;0.066 +0.058;0.044;0.044;0.051;0.049;0.051 +0.069;0.044;0.044;0.041;0.044;0.059 +0.035;0.037;0.061;0.041;0.044;0.073 +0.069;0.037;0.066;0.046;0.044;0.066 +0.064;0.044;0.066;0.031;0.039;0.044 +0.023;0.022;0.044;0.029;0.02;0.022 +0;0.007;0.022;0.01;0.01;0.018 +0;0.003;0.007;0.002;0.004;0.012 diff --git a/src/main/resources/load/markov/probabilities/switch_on_probabilities/water_heating.csv b/src/main/resources/load/markov/probabilities/switch_on_probabilities/water_heating.csv new file mode 100644 index 0000000000..6ef7c72b07 --- /dev/null +++ b/src/main/resources/load/markov/probabilities/switch_on_probabilities/water_heating.csv @@ -0,0 +1,25 @@ +summer_weekday;summer_saturday;summer_sunday;winter_weekday;winter_saturday;winter_sunday +0.008;0.007;0.013;0.006;0.008;0.015 +0.005;0.005;0.007;0.003;0.007;0.006 +0.008;0.003;0.004;0.001;0.002;0.004 +0.007;0.001;0.001;0.006;0.002;0.001 +0.012;0.003;0;0.011;0.002;0.001 +0.034;0.007;0.002;0.025;0.008;0.002 +0.095;0.021;0.013;0.062;0.014;0.006 +0.085;0.039;0.03;0.071;0.031;0.02 +0.082;0.067;0.081;0.073;0.071;0.074 +0.063;0.067;0.103;0.065;0.071;0.103 +0.049;0.06;0.103;0.056;0.057;0.118 +0.034;0.048;0.092;0.04;0.059;0.106 +0.032;0.054;0.074;0.045;0.057;0.088 +0.033;0.054;0.063;0.051;0.059;0.074 +0.034;0.048;0.048;0.045;0.057;0.047 +0.038;0.048;0.033;0.031;0.055;0.034 +0.04;0.054;0.033;0.034;0.057;0.027 +0.063;0.06;0.041;0.051;0.098;0.049 +0.05;0.067;0.044;0.079;0.086;0.052 +0.053;0.073;0.063;0.09;0.088;0.064 +0.046;0.06;0.044;0.062;0.039;0.029 +0.042;0.079;0.044;0.042;0.031;0.039 +0.053;0.048;0.041;0.034;0.026;0.025 +0.032;0.024;0.022;0.017;0.016;0.017 diff --git a/src/main/resources/load/markov/probabilities/usage_probabilities/usage_probabilities.csv b/src/main/resources/load/markov/probabilities/usage_probabilities/usage_probabilities.csv new file mode 100644 index 0000000000..1d82ce52e6 --- /dev/null +++ b/src/main/resources/load/markov/probabilities/usage_probabilities/usage_probabilities.csv @@ -0,0 +1,14 @@ +appliance_category,usage_probability +washing_machine,0.6 +lighting,5 +dish_washer,0.77 +video_recorder,0.15 +telecommunication,1 +pc,1 +fridge,1 +television,2.33 +freezer,1 +dryer,0.44 +stove,1 +water_heating,1 +other_load,1 \ No newline at end of file diff --git a/src/main/scala/edu/ie3/simona/agent/participant/load/LoadAgent.scala b/src/main/scala/edu/ie3/simona/agent/participant/load/LoadAgent.scala index 338547efbc..669901a16b 100644 --- a/src/main/scala/edu/ie3/simona/agent/participant/load/LoadAgent.scala +++ b/src/main/scala/edu/ie3/simona/agent/participant/load/LoadAgent.scala @@ -14,6 +14,7 @@ import edu.ie3.simona.agent.participant.load.LoadAgentFundamentals.{ ProfileLoadAgentFundamentals, RandomLoadAgentFundamentals, } +import edu.ie3.simona.agent.participant.load.markov.MarkovAgent import edu.ie3.simona.agent.participant.statedata.ParticipantStateData import edu.ie3.simona.agent.participant.statedata.ParticipantStateData.ParticipantInitializeStateData import edu.ie3.simona.config.SimonaConfig.LoadRuntimeConfig @@ -47,6 +48,8 @@ object LoadAgent { Props(new ProfileLoadAgent(scheduler, initStateData, listener)) case LoadModelBehaviour.RANDOM => Props(new RandomLoadAgent(scheduler, initStateData, listener)) + case LoadModelBehaviour.MARKOV => + Props(new MarkovAgent(scheduler, initStateData, listener)) case unsupported => throw new IllegalArgumentException( s"The load agent behaviour '$unsupported' is currently not supported." diff --git a/src/main/scala/edu/ie3/simona/agent/participant/load/LoadAgentFundamentals.scala b/src/main/scala/edu/ie3/simona/agent/participant/load/LoadAgentFundamentals.scala index 78519a54c1..537c40a5ef 100644 --- a/src/main/scala/edu/ie3/simona/agent/participant/load/LoadAgentFundamentals.scala +++ b/src/main/scala/edu/ie3/simona/agent/participant/load/LoadAgentFundamentals.scala @@ -520,13 +520,14 @@ object LoadAgentFundamentals { operationInterval: OperationInterval, modelConfig: LoadRuntimeConfig, reference: LoadReference, - ): RandomLoadModel = + ): RandomLoadModel = { RandomLoadModel( inputModel, operationInterval, modelConfig.scaling, reference, ) + } override protected def createCalcRelevantData( baseStateData: ParticipantModelBaseStateData[ diff --git a/src/main/scala/edu/ie3/simona/agent/participant/load/markov/MarkovAgent.scala b/src/main/scala/edu/ie3/simona/agent/participant/load/markov/MarkovAgent.scala new file mode 100644 index 0000000000..5bffd87277 --- /dev/null +++ b/src/main/scala/edu/ie3/simona/agent/participant/load/markov/MarkovAgent.scala @@ -0,0 +1,195 @@ +/* + * © 2024. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.agent.participant.load.markov + +import edu.ie3.datamodel.models.input.system.LoadInput +import edu.ie3.datamodel.models.result.system.SystemParticipantResult +import edu.ie3.simona.agent.participant.ParticipantAgent +import edu.ie3.simona.agent.participant.data.Data.PrimaryData.ApparentPower +import edu.ie3.simona.agent.participant.statedata.ParticipantStateData.ParticipantInitializeStateData +import edu.ie3.simona.agent.participant.statedata.{BaseStateData, ParticipantStateData} +import edu.ie3.simona.agent.state.AgentState +import edu.ie3.simona.config.SimonaConfig.LoadRuntimeConfig +import edu.ie3.simona.model.participant.ModelState.ConstantState +import edu.ie3.simona.model.participant.load.markov.{MarkovModel, MarkovRelevantData} +import edu.ie3.simona.model.participant.{FlexChangeIndicator, ModelState} +import edu.ie3.util.scala.quantities.ReactivePower +import org.apache.pekko.actor.{ActorRef, FSM, Props} +import squants.{Dimensionless, Power} + +import java.time.ZonedDateTime +import java.util.UUID + +object MarkovAgent { + def props( + scheduler: ActorRef, + initStateData: ParticipantInitializeStateData[ + LoadInput, + LoadRuntimeConfig, + ApparentPower, + ], + listener: Iterable[ActorRef], + ): Props = + Props( + new MarkovAgent( + scheduler, + initStateData: ParticipantInitializeStateData[ + LoadInput, + LoadRuntimeConfig, + ApparentPower, + ], + listener, + ) + ) +} + +/** Creating a load agent + * + * @param scheduler + * Actor reference of the scheduler + * @param listener + * List of listeners interested in results + */ +class MarkovAgent( + scheduler: ActorRef, + initStateData: ParticipantInitializeStateData[ + LoadInput, + LoadRuntimeConfig, + ApparentPower, + ], + override val listener: Iterable[ActorRef], +) extends ParticipantAgent[ + ApparentPower, + MarkovRelevantData, + ConstantState.type, + ParticipantStateData[ApparentPower], + LoadInput, + LoadRuntimeConfig, + MarkovModel, + ](scheduler, initStateData) + with MarkovAgentFundamentals { + /* + * "Hey, SIMONA! What is handled in ParticipantAgent?" + * "Hey, dude! The following things are handled in ParticipantAgent: + * 1) Initialization of Agent + * 2) Event reactions in Idle state + * 3) Handling of incoming information + * 4) Performing model calculations + * " + */ + /** Partial function, that is able to transfer + * [[ParticipantModelBaseStateData]] (holding the actual calculation model) + * into a pair of active and reactive power + */ + override protected val calculateModelPowerFunc: (Long, BaseStateData.ParticipantModelBaseStateData[ApparentPower, MarkovRelevantData, ModelState.ConstantState.type, MarkovModel], ModelState.ConstantState.type, Dimensionless) => ApparentPower = ??? + + /** Abstractly calculate the power output of the participant utilising + * secondary data. However, it might appear, that not the complete set of + * secondary data is available for the given tick. This might especially be + * true, if the actor has been additionally activated. This method thereby + * has to try and fill up missing data with the last known data, as this is + * still supposed to be valid. The secondary data therefore is put to the + * calculation relevant data store.
The next state is [[Idle]], sending a + * [[edu.ie3.simona.ontology.messages.SchedulerMessage.Completion]] to + * scheduler and using update result values.
Actual implementation + * can be found in each participant's fundamentals. + * + * @param baseStateData + * The base state data with collected secondary data + * @param lastModelState + * The current model state, before applying changes by externally received + * data + * @param currentTick + * Tick, the trigger belongs to + * @param scheduler + * [[ActorRef]] to the scheduler in the simulation + * @return + * [[Idle]] with updated result values + */ + override def calculatePowerWithSecondaryDataAndGoToIdle(baseStateData: BaseStateData.ParticipantModelBaseStateData[ApparentPower, MarkovRelevantData, ModelState.ConstantState.type, MarkovModel], lastModelState: ModelState.ConstantState.type, currentTick: Long, scheduler: ActorRef): FSM.State[AgentState, ParticipantStateData[ApparentPower]] = ??? + + override protected def createInitialState(baseStateData: BaseStateData.ParticipantModelBaseStateData[ApparentPower, MarkovRelevantData, ModelState.ConstantState.type, MarkovModel]): ModelState.ConstantState.type = ??? + + override protected def createCalcRelevantData(baseStateData: BaseStateData.ParticipantModelBaseStateData[ApparentPower, MarkovRelevantData, ModelState.ConstantState.type, MarkovModel], tick: Long): MarkovRelevantData = ??? + + /** Handle an active power change by flex control. + * + * @param tick + * Tick, in which control is issued + * @param baseStateData + * Base state data of the agent + * @param data + * Calculation relevant data + * @param lastState + * Last known model state + * @param setPower + * Setpoint active power + * @return + * Updated model state, a result model and a [[FlexChangeIndicator]] + */ + override def handleControlledPowerChange(tick: Long, baseStateData: BaseStateData.ParticipantModelBaseStateData[ApparentPower, MarkovRelevantData, ModelState.ConstantState.type, MarkovModel], data: MarkovRelevantData, lastState: ModelState.ConstantState.type, setPower: Power): (ModelState.ConstantState.type, ApparentPower, FlexChangeIndicator) = ??? + + /** Abstract method to build the calculation model from input + * + * @param inputModel + * Input model description + * @param modelConfig + * Configuration for the model + * @param simulationStartDate + * Wall clock time of first instant in simulation + * @param simulationEndDate + * Wall clock time of last instant in simulation + * @return + */ + override def buildModel(inputModel: ParticipantStateData.InputModelContainer[LoadInput], modelConfig: LoadRuntimeConfig, simulationStartDate: ZonedDateTime, simulationEndDate: ZonedDateTime): MarkovModel = ??? + + /** Update the last known model state with the given external, relevant data + * + * @param tick + * Tick to update state for + * @param modelState + * Last known model state + * @param calcRelevantData + * Data, relevant for calculation + * @param nodalVoltage + * Current nodal voltage of the agent + * @param model + * Model for calculation + * @return + * The updated state at given tick under consideration of calculation + * relevant data + */ + override protected def updateState(tick: Long, modelState: ModelState.ConstantState.type, calcRelevantData: MarkovRelevantData, nodalVoltage: Dimensionless, model: MarkovModel): ModelState.ConstantState.type = ??? + + /** Determine the average result within the given tick window + * + * @param tickToResults + * Mapping from data tick to actual data + * @param windowStart + * First, included tick of the time window + * @param windowEnd + * Last, included tick of the time window + * @param activeToReactivePowerFuncOpt + * An Option on a function, that transfers the active into reactive power + * @return + * The averaged result + */ + override def averageResults(tickToResults: Map[Long, ApparentPower], windowStart: Long, windowEnd: Long, activeToReactivePowerFuncOpt: Option[Power => ReactivePower]): ApparentPower = ??? + + /** Determines the correct result. + * + * @param uuid + * Unique identifier of the physical model + * @param dateTime + * Real world date of the result + * @param result + * The primary data to build a result model for + * @return + * The equivalent event + */ + override protected def buildResult(uuid: UUID, dateTime: ZonedDateTime, result: ApparentPower): SystemParticipantResult = ??? +} diff --git a/src/main/scala/edu/ie3/simona/agent/participant/load/markov/MarkovAgentFundamentals.scala b/src/main/scala/edu/ie3/simona/agent/participant/load/markov/MarkovAgentFundamentals.scala new file mode 100644 index 0000000000..ed6ebdcdc5 --- /dev/null +++ b/src/main/scala/edu/ie3/simona/agent/participant/load/markov/MarkovAgentFundamentals.scala @@ -0,0 +1,383 @@ +/* + * © 2024. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + + package edu.ie3.simona.agent.participant.load.markov + + import edu.ie3.datamodel.models.input.system.LoadInput + import edu.ie3.datamodel.models.result.system.{LoadResult, SystemParticipantResult} + import edu.ie3.simona.agent.participant.ParticipantAgent.getAndCheckNodalVoltage + import edu.ie3.simona.agent.participant.ParticipantAgentFundamentals + import edu.ie3.simona.agent.participant.data.Data.PrimaryData.{ApparentPower, ZERO_POWER} + import edu.ie3.simona.agent.participant.data.Data.SecondaryData + import edu.ie3.simona.agent.participant.data.secondary.SecondaryDataService + import edu.ie3.simona.agent.participant.load.LoadAgent + import edu.ie3.simona.agent.participant.statedata.BaseStateData.{FlexControlledData, ParticipantModelBaseStateData} + import edu.ie3.simona.agent.participant.statedata.ParticipantStateData + import edu.ie3.simona.agent.participant.statedata.ParticipantStateData.InputModelContainer + import edu.ie3.simona.agent.state.AgentState + import edu.ie3.simona.agent.state.AgentState.Idle + import edu.ie3.simona.config.SimonaConfig.LoadRuntimeConfig + import edu.ie3.simona.event.notifier.NotifierConfig + import edu.ie3.simona.exceptions.agent.InconsistentStateException + import edu.ie3.simona.model.participant.ModelState.ConstantState + import edu.ie3.simona.model.participant.load.LoadReference + import edu.ie3.simona.model.participant.load.markov.MarkovModel.MarkovRelevantData + import edu.ie3.simona.model.participant.load.markov.{MarkovModel, MarkovRelevantData} + import edu.ie3.simona.model.participant.{FlexChangeIndicator, ModelState} + import edu.ie3.simona.ontology.messages.flex.FlexibilityMessage.FlexResponse + import edu.ie3.simona.util.TickUtil.TickLong + import edu.ie3.util.quantities.QuantityUtils.RichQuantityDouble + import edu.ie3.util.scala.OperationInterval + import edu.ie3.util.scala.quantities.ReactivePower + import org.apache.pekko.actor.typed.{ActorRef => TypedActorRef} + import org.apache.pekko.actor.{ActorRef, FSM} + import squants.{Dimensionless, Power} + + import java.time.ZonedDateTime + import java.util.UUID + import scala.reflect.{ClassTag, classTag} + + protected trait MarkovAgentFundamentals + extends ParticipantAgentFundamentals[ + ApparentPower, + MarkovRelevantData, + ConstantState.type, + ParticipantStateData[ApparentPower], + LoadInput, + LoadRuntimeConfig, + MarkovModel, + ] { + this: MarkovAgent => + override protected val pdClassTag: ClassTag[ApparentPower] = + classTag[ApparentPower] + override val alternativeResult: ApparentPower = ZERO_POWER + + /** Determines the needed base state data in dependence of the foreseen + * simulation mode of the agent. + * + * @param inputModel + * Input model definition + * @param modelConfig + * Configuration of the model + * @param services + * Collection of services to register with + * @param simulationStartDate + * Real world time date time, when the simulation starts + * @param simulationEndDate + * Real world time date time, when the simulation ends + * @param resolution + * Agents regular time bin it wants to be triggered e.g one hour + * @param requestVoltageDeviationThreshold + * Threshold, after which two nodal voltage magnitudes from participant + * power requests for the same tick are considered to be different + * @param outputConfig + * Config of the output behaviour for simulation results + * @return + * A child of [[ParticipantModelBaseStateData]] that reflects the behaviour + * based on the data source definition + */ + + override def determineModelBaseStateData( + inputModel: InputModelContainer[LoadInput], + modelConfig: LoadRuntimeConfig, + services: Iterable[SecondaryDataService[_ <: SecondaryData]], + simulationStartDate: ZonedDateTime, + simulationEndDate: ZonedDateTime, + resolution: Long, + requestVoltageDeviationThreshold: Double, + outputConfig: NotifierConfig, + maybeEmAgent: Option[TypedActorRef[FlexResponse]], + ): ParticipantModelBaseStateData[ + ApparentPower, + MarkovRelevantData, + ModelState.ConstantState.type, + MarkovModel + + + ] = { + /* Build the calculation model */ + val model = + buildModel( + inputModel, + modelConfig, + simulationStartDate, + simulationEndDate, + ) + + def createInitialState( + baseStateData: ParticipantModelBaseStateData[ + ApparentPower, + MarkovRelevantData, + ConstantState.type, + MarkovModel, + ] + ): ModelState.ConstantState.type = + ConstantState + + def createCalcRelevantData( + baseStateData: ParticipantModelBaseStateData[ + ApparentPower, + MarkovRelevantData, + ConstantState.type, + MarkovModel, + ], + tick: Long, + ): MarkovRelevantData = MarkovRelevantData(currentTick.toDateTime(baseStateData.startDate)) + + /** Handle an active power change by flex control. + * @param tick + * Tick, in which control is issued + * @param baseStateData + * Base state data of the agent + * @param data + * Calculation relevant data + * @param lastState + * Last known model state + * @param setPower + * Setpoint active power + * @return + * Updated model state, a result model and a [[FlexChangeIndicator]] + */ + def handleControlledPowerChange( + tick: Long, + baseStateData: ParticipantModelBaseStateData[ + ApparentPower, + MarkovRelevantData, + ConstantState.type, + MarkovModel, + ], + data: MarkovRelevantData, + lastState: ConstantState.type, + setPower: squants.Power, + ): (ConstantState.type, ApparentPower, FlexChangeIndicator) = { + /* Calculate result */ + val voltage = getAndCheckNodalVoltage(baseStateData, tick) + + val reactivePower = baseStateData.model.calculateReactivePower( + setPower, + voltage, + ) + val result = ApparentPower(setPower, reactivePower) + + /* Handle the request within the model */ + val (updatedState, flexChangeIndicator) = + baseStateData.model.handleControlledPowerChange(data, lastState, setPower) + (updatedState, result, flexChangeIndicator) + } + + /** Partial function, that is able to transfer + * [[ParticipantModelBaseStateData]] (holding the actual calculation model) + * into a pair of active and reactive power + */ + + /** Partial function, that is able to transfer + * [[ParticipantModelBaseStateData]] (holding the actual calculation model) + * into a pair of active and reactive power + */ + val calculateModelPowerFunc: ( + Long, + ParticipantModelBaseStateData[ + ApparentPower, + MarkovRelevantData.type, + ConstantState.type, + MarkovModel, + ], + ConstantState.type, + Dimensionless, + ) => ApparentPower = ( + tick: Long, + baseStateData: ParticipantModelBaseStateData[ + ApparentPower, + MarkovRelevantData.type, + ConstantState.type, + MarkovModel, + ], + state: ConstantState.type, + voltage: Dimensionless, + ) => + baseStateData.model.calculatePower( + tick, + voltage, + state, + MarkovRelevantData, + ) + + /** Calculate the power output of the participant utilising secondary data. + * However, it might appear, that not the complete set of secondary data is + * available for the given tick. This might especially be true, if the actor + * has been additionally activated. This method thereby has to try and fill + * up missing data with the last known data, as this is still supposed to be + * valid. The secondary data therefore is put to the calculation relevant + * data store.The next state is [[Idle]], sending a + * [[edu.ie3.simona.ontology.messages.SchedulerMessage.Completion]] to + * scheduler and using update result values.
+ * + * @param baseStateData + * The base state data with collected secondary data + * @param lastModelState + * Optional last model state + * @param currentTick + * Tick, the trigger belongs to + * @param scheduler + * [[ActorRef]] to the scheduler in the simulation + * @return + * [[Idle]] with updated result values + */ + def calculatePowerWithSecondaryDataAndGoToIdle( + baseStateData: ParticipantModelBaseStateData[ + ApparentPower, + MarkovRelevantData, + ConstantState.type, + MarkovModel, + ], + lastModelState: ConstantState.type, + currentTick: Long, + scheduler: ActorRef, + ): FSM.State[AgentState, ParticipantStateData[ApparentPower]] = + throw new InconsistentStateException( + s"Markov model is not able to calculate power with secondary data." + ) + + /** Determine the average result within the given tick window + * + * @param tickToResults + * Mapping from data tick to actual data + * @param windowStart + * First, included tick of the time window + * @param windowEnd + * Last, included tick of the time window + * @param activeToReactivePowerFuncOpt + * An Option on a function, that transfers the active into reactive power + * @return + * The averaged result + */ + def averageResults( + tickToResults: Map[Long, ApparentPower], + windowStart: Long, + windowEnd: Long, + activeToReactivePowerFuncOpt: Option[ + Power => ReactivePower + ] = None, + ): ApparentPower = + ParticipantAgentFundamentals.averageApparentPower( + tickToResults, + windowStart, + windowEnd, + activeToReactivePowerFuncOpt, + log, + ) + + /** Determines the correct result. + * + * @param uuid + * Unique identifier of the physical model + * @param dateTime + * Real world date of the result + * @param result + * The primary data to build a result model for + * @return + * The equivalent event + */ + def buildResult( + uuid: UUID, + dateTime: ZonedDateTime, + result: ApparentPower, + ): SystemParticipantResult = + new LoadResult( + dateTime, + uuid, + result.p.toMegawatts.asMegaWatt, + result.q.toMegavars.asMegaVar, + ) + + /** Update the last known model state with the given external, relevant data + * + * @param tick + * Tick to update state for + * @param modelState + * Last known model state + * @param calcRelevantData + * Data, relevant for calculation + * @param nodalVoltage + * Current nodal voltage of the agent + * @param model + * Model for calculation + * @return + * The updated state at given tick under consideration of calculation + * relevant data + */ + def updateState( + tick: Long, + modelState: ModelState.ConstantState.type, + calcRelevantData: MarkovRelevantData, + nodalVoltage: squants.Dimensionless, + model: MarkovModel, + ): ModelState.ConstantState.type = modelState + } + } + + + object MarkovAgentFundamentals { + def buildModel( + inputModel: LoadInput, + operationInterval: OperationInterval, + modelConfig: LoadRuntimeConfig, + reference: LoadReference, + ): MarkovModel = + MarkovModel( + inputModel, + modelConfig.scaling, + operationInterval, + reference, + ) + + protected def createCalcRelevantData( + baseStateData: ParticipantModelBaseStateData[ + ApparentPower, + MarkovRelevantData.type, + ConstantState.type, + MarkovModel, + ], + tick: Long, + ): MarkovRelevantData.type = + MarkovRelevantData + + /** Partial function, that is able to transfer + * [[ParticipantModelBaseStateData]] (holding the actual calculation model) + * into a pair of active and reactive power + */ + val calculateModelPowerFunc: ( + Long, + ParticipantModelBaseStateData[ + ApparentPower, + MarkovRelevantData.type, + ConstantState.type, + MarkovModel, + ], + ConstantState.type, + Dimensionless, + ) => ApparentPower = ( + tick: Long, + baseStateData: ParticipantModelBaseStateData[ + ApparentPower, + MarkovRelevantData.type, + ConstantState.type, + MarkovModel, + ], + state: ConstantState.type, + voltage: Dimensionless, + ) => + baseStateData.model.calculatePower( + tick, + voltage, + state, + MarkovRelevantData, + ) + + + + + } diff --git a/src/main/scala/edu/ie3/simona/model/participant/load/LoadModelBehaviour.scala b/src/main/scala/edu/ie3/simona/model/participant/load/LoadModelBehaviour.scala index 0a9a057808..851a1d76c1 100644 --- a/src/main/scala/edu/ie3/simona/model/participant/load/LoadModelBehaviour.scala +++ b/src/main/scala/edu/ie3/simona/model/participant/load/LoadModelBehaviour.scala @@ -14,4 +14,5 @@ case object LoadModelBehaviour extends ParsableEnumeration { val FIX: Value = Value("fix") val PROFILE: Value = Value("profile") val RANDOM: Value = Value("random") + val MARKOV: Value = Value("markov") } diff --git a/src/main/scala/edu/ie3/simona/model/participant/load/markov/ApplianceCategory.scala b/src/main/scala/edu/ie3/simona/model/participant/load/markov/ApplianceCategory.scala new file mode 100644 index 0000000000..8d1d9dc53c --- /dev/null +++ b/src/main/scala/edu/ie3/simona/model/participant/load/markov/ApplianceCategory.scala @@ -0,0 +1,31 @@ +/* + * © 2024. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.model.participant.load.markov + +import edu.ie3.simona.util.ParsableEnumeration + +final case class ApplianceCategory() + +/** Enumeration of all considered appliance types + */ +case object ApplianceCategory extends ParsableEnumeration { + // val K: Value = Value("k") + + val DISH_WASHER: Value = Value("dish_washer") + val WASHING_MACHINE: Value = Value("washing_machine") + val DRYER: Value = Value("dryer") + val STOVE: Value = Value("stove") + val FRIDGE: Value = Value("fridge") + val FREEZER: Value = Value("freezer") + val TELEVISION: Value = Value("television") + val VIDEO_RECORDER: Value = Value("video_recorder") + val PC: Value = Value("pc") + val TELECOMMUNICATION: Value = Value("telecommunication") + val LIGHTING: Value = Value("lighting") + val WATER_HEATING: Value = Value("water_heating") + val OTHER_LOAD: Value = Value("other_load") +} diff --git a/src/main/scala/edu/ie3/simona/model/participant/load/markov/MarkovModel.scala b/src/main/scala/edu/ie3/simona/model/participant/load/markov/MarkovModel.scala new file mode 100644 index 0000000000..e2f91bae01 --- /dev/null +++ b/src/main/scala/edu/ie3/simona/model/participant/load/markov/MarkovModel.scala @@ -0,0 +1,160 @@ +/* + * © 2024. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.model.participant.load.markov + +import edu.ie3.datamodel.models.input.system.LoadInput +import edu.ie3.simona.agent.participant.data.Data.PrimaryData.ApparentPower +import edu.ie3.simona.model.participant.CalcRelevantData.LoadRelevantData +import edu.ie3.simona.model.participant.ModelState.ConstantState +import edu.ie3.simona.model.participant.control.QControl +import edu.ie3.simona.model.participant.load.LoadReference +import edu.ie3.simona.model.participant.load.markov.MarkovParamStore._ +import edu.ie3.simona.model.participant.{CalcRelevantData, FlexChangeIndicator, ModelState, SystemParticipant} +import edu.ie3.simona.ontology.messages.flex.FlexibilityMessage +import edu.ie3.util.quantities.PowerSystemUnits +import edu.ie3.util.scala.OperationInterval +import squants.Dimensionless +import squants.energy.{Kilowatts, Power} + +import java.util.UUID + +final case class MarkovModel( + uuid: UUID, + id: String, + operationInterval: OperationInterval, + qControl: QControl, + sRated: Power, + cosPhiRated: Double, +) extends SystemParticipant[ + MarkovRelevantData, + ApparentPower, + ConstantState.type, + ]( + uuid = ???, + id = ???, + operationInterval = ???, + qControl = ???, + sRated = ???, + cosPhiRated = ???, + ) { + + /** Calculate the power behaviour based on the given data. + * + * @param tick + * Regarded instant in simulation + * @param voltage + * Nodal voltage magnitude + * @param modelState + * Current state of the model + * @param data + * Further needed, secondary data + * @return + * A tuple of active and reactive power + */ + override def calculatePower( + tick: Long, + voltage: Dimensionless, + modelState: ModelState.ConstantState.type, + data: MarkovRelevantData, + ): ApparentPower = { + val activePower = calculateActivePower(modelState, data) + val reactivePower = calculateReactivePower(activePower, voltage) + + ApparentPower(activePower, reactivePower) + } + + /** Calculate the active power behaviour of the model + * + * @param modelState + * Current state of the model + * @param data + * Further needed, secondary data + * @return + * Active power + */ + override protected def calculateActivePower( + modelState: ModelState.ConstantState.type, + data: MarkovRelevantData, + ): Power = { + + // Map's + + Kilowatts(1) // Test + } + + /** @param data + * The relevant data for calculation + * @param lastState + * The last reached state + * @return + * flex options + */ + override def determineFlexOptions( + data: MarkovRelevantData, + lastState: ModelState.ConstantState.type, + ): FlexibilityMessage.ProvideFlexOptions = ??? + + /** @param data + * The relevant data for calculation + * @param lastState + * The last reached state + * @param setPower + * power that has been set by ??? + * @return + * updated relevant data and an indication at which circumstances flex + * options will change next + */ + override def handleControlledPowerChange( + data: MarkovRelevantData, + lastState: ModelState.ConstantState.type, + setPower: Power, + ): (ModelState.ConstantState.type, FlexChangeIndicator) = ??? +} + +class MarkovRelevantData extends CalcRelevantData { + + val Usage_Probabilities_Map = Usage_Probabilities() + val sop_Dish_Washer_Map = sop_Dish_Washer() + + val average_Hh_Map = Average_HH() + val by_Income_Map = income() + val by_Inhabitants_Map = inhabitants() + val by_Type_Map = Type() + val load_Ts_Map = load_TS() +} + + +object MarkovModel { + case object MarkovRelevantData extends LoadRelevantData + + def apply( + input: LoadInput, + scalingFactor: Double, + operationInterval: OperationInterval, + reference: LoadReference, + ): MarkovModel = { + + val scaledInput = input.copy().scale(scalingFactor).build() + + val model = MarkovModel( + scaledInput.getUuid, + scaledInput.getId, + operationInterval, + QControl(scaledInput.getqCharacteristics()), + Kilowatts( + scaledInput.getsRated + .to(PowerSystemUnits.KILOWATT) + .getValue + .doubleValue + ), + scaledInput.getCosPhiRated + ) + model.enable() + model + } + +} diff --git a/src/main/scala/edu/ie3/simona/model/participant/load/markov/MarkovParamStore.scala b/src/main/scala/edu/ie3/simona/model/participant/load/markov/MarkovParamStore.scala new file mode 100644 index 0000000000..0b550dccf2 --- /dev/null +++ b/src/main/scala/edu/ie3/simona/model/participant/load/markov/MarkovParamStore.scala @@ -0,0 +1,239 @@ +/* + * © 2020. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.model.participant.load.markov + +import java.io.{InputStreamReader, Reader} +import com.typesafe.scalalogging.LazyLogging +import org.apache.commons.csv.{CSVFormat, CSVParser} + +import scala.collection.mutable +import scala.collection.mutable.{Map => MutableMap} +import scala.jdk.CollectionConverters._ + +/** Storage for a collection of MarkovAgent parameters. + */ + +final case class MarkovParamStore() {} + +/** MarkovPramStore reads values from CSV files and returns them as Maps, where + * the keys represent different parameters and the values are the corresponding + * values. + */ + +object MarkovParamStore extends LazyLogging { + + def main(args: Array[String]): Unit = {} + + /** This function reads the usage probabilities from a CSV file and returns + * them as a Map, where the keys are the appliance categories and the values + * are the corresponding probabilities. + */ + + // Usage Probabilities + + def Usage_Probabilities(): Map[String, Double] = { + val reader = getDefaultReader + val customFormat = CSVFormat.DEFAULT.builder().setHeader().build() + val csvParser = new CSVParser(reader, customFormat) + val records = csvParser.iterator().asScala.drop(1) + val probabilitiesMap = records.map { record => + val applianceCategory = record.get("appliance_category") + val usageProbability = record.get("usage_probability").toDouble + (applianceCategory, usageProbability) + }.toMap + reader.close() + probabilitiesMap + } + + /** @return + * A reader pointing to the default Usage_Probabilities parameter location + */ + + private def getDefaultReader: Reader = { + logger.info( + "Markov Usage_Probabilities parameters file 'usage_probability.csv' from jar." + ) + new InputStreamReader( + getClass.getResourceAsStream( + "/load/markov/probabilities/usage_probabilities/usage_probabilities.csv" + ) + ) + } + + // Switch On Probabilities + + def sop_Dish_Washer(): mutable.Map[String, Seq[Int]] = { + val reader = getDefaultReadersop_Dish_Washer + val customFormat = CSVFormat.DEFAULT.builder().setHeader().build() + val csvParser = new CSVParser(reader, customFormat) + val records = csvParser.getRecords.asScala.toSeq + val header = csvParser.getHeaderNames.asScala.toSeq + val dish_Washer = mutable.Map[String, Seq[Int]]() + for (record <- records) { + for (i <- header.indices) { + val applianceCategory = header(i) + val value = record.get(i).toInt + val existingValues = dish_Washer.getOrElse(applianceCategory, Seq()) + dish_Washer.put(applianceCategory, existingValues :+ value) + } + } + reader.close() + dish_Washer + } + + def getDefaultReadersop_Dish_Washer: Reader = { + logger.info("Markov Income parameters file 'dish_washer.csv' from jar.") + new InputStreamReader( + getClass.getResourceAsStream( + "/load/markov/probabilities/switch_on_probabilities/dish_washer.csv" + ) + ) + } + + // Average HH + + def Average_HH(): Map[String, Double] = { + val reader = getDefaultReaderForAverageHH + val csvParser = CSVFormat.DEFAULT.parse(reader) + val records = csvParser.getRecords.asScala + val averageHHMap = records.headOption match { + case Some(headerRecord) => + val applianceNames = headerRecord.iterator().asScala.toSeq + val valuesRecord = + records.drop(1).headOption.getOrElse(csvParser.iterator().next()) + val averageHHValues = valuesRecord.iterator().asScala.map(_.toDouble) + applianceNames.zip(averageHHValues).toMap + case None => + Map.empty[String, Double] + } + + reader.close() + averageHHMap + } + + private def getDefaultReaderForAverageHH: Reader = { + logger.info("Markov Average_HH parameters file 'average_hh.csv' from jar.") + new InputStreamReader( + getClass.getResourceAsStream("/load/markov/appliances/average_hh.csv") + ) + } + + // By_Type + + def Type(): MutableMap[String, Map[String, Double]] = { + val reader = getDefaultReaderType + val customFormat = CSVFormat.DEFAULT.builder().setHeader().build() + val csvParser = new CSVParser(reader, customFormat) + val records = csvParser.getRecords.asScala.toSeq + val typeMap = MutableMap[String, Map[String, Double]]() + records.foreach { record => + val typeCategory = record.get(0) + val appliancesMap = MutableMap[String, Double]() + val header = csvParser.getHeaderNames.asScala.drop(1) + header.zipWithIndex.foreach { case (appliance, index) => + val value = record.get(index + 1).toDouble + appliancesMap += (appliance -> value) + } + typeMap += (typeCategory -> appliancesMap.toMap) + } + reader.close() + typeMap + } + + private def getDefaultReaderType: Reader = { + logger.info("Markov Income parameters file 'by_Type.csv' from jar.") + new InputStreamReader( + getClass.getResourceAsStream("/load/markov/appliances/by_Type.csv") + ) + } + + // By Income + + def income(): MutableMap[String, Map[String, Double]] = { + val reader = getDefaultReaderIncome + val customFormat = CSVFormat.DEFAULT.builder().setHeader().build() + val csvParser = new CSVParser(reader, customFormat) + val records = csvParser.getRecords.asScala.toSeq + val incomeMap = MutableMap[String, Map[String, Double]]() + records.foreach { record => + val incomeCategory = record.get(0) + val appliancesMap = MutableMap[String, Double]() + val header = csvParser.getHeaderNames.asScala.drop(1) + header.zipWithIndex.foreach { case (appliance, index) => + val value = record.get(index + 1).toDouble + appliancesMap += (appliance -> value) + } + incomeMap += (incomeCategory -> appliancesMap.toMap) + } + reader.close() + incomeMap + } + + private def getDefaultReaderIncome: Reader = { + logger.info("Markov Income parameters file 'by_income.csv' from jar.") + new InputStreamReader( + getClass.getResourceAsStream("/load/markov/appliances/by_income.csv") + ) + } + + // By Inhabitants + + def inhabitants(): MutableMap[String, Map[String, Double]] = { + val reader = getDefaultReaderInhabitants + val customFormat = CSVFormat.DEFAULT.builder().setHeader().build() + val csvParser = new CSVParser(reader, customFormat) + val records = csvParser.getRecords.asScala.toSeq + val inhabitantsMap = MutableMap[String, Map[String, Double]]() + records.foreach { record => + val inhabitantCategory = record.get(0) + val appliancesMap = MutableMap[String, Double]() + val header = csvParser.getHeaderNames.asScala.drop(1) + header.zipWithIndex.foreach { case (appliance, index) => + val value = record.get(index + 1).toDouble + appliancesMap += (appliance -> value) + } + inhabitantsMap += (inhabitantCategory -> appliancesMap.toMap) + } + reader.close() + inhabitantsMap + } + + private def getDefaultReaderInhabitants: Reader = { + println("Reading by_inhabitants.csv file.") + new InputStreamReader( + getClass.getResourceAsStream("/load/markov/appliances/by_inhabitants.csv") + ) + } + + // Load_TS + + def load_TS(): mutable.Map[String, Seq[Int]] = { + val reader = getDefaultReaderLoadTS + val customFormat = CSVFormat.DEFAULT.builder().setHeader().build() + val csvParser = new CSVParser(reader, customFormat) + val records = csvParser.getRecords.asScala.toSeq + val header = csvParser.getHeaderNames.asScala.toSeq + val loadTS = mutable.Map[String, Seq[Int]]() + for (record <- records) { + for (i <- header.indices) { + val applianceCategory = header(i) + val value = record.get(i).toInt + val existingValues = loadTS.getOrElse(applianceCategory, Seq()) + loadTS.put(applianceCategory, existingValues :+ value) + } + } + reader.close() + loadTS + } + + def getDefaultReaderLoadTS: Reader = { + logger.info("Markov Income parameters file 'load_ts.csv' from jar.") + new InputStreamReader( + getClass.getResourceAsStream("/load/markov/appliances/load_ts.csv") + ) + } +} diff --git a/src/main/scala/edu/ie3/simona/model/participant/load/markov/SwitchOnProbabilityKey.scala b/src/main/scala/edu/ie3/simona/model/participant/load/markov/SwitchOnProbabilityKey.scala new file mode 100644 index 0000000000..735fc817ce --- /dev/null +++ b/src/main/scala/edu/ie3/simona/model/participant/load/markov/SwitchOnProbabilityKey.scala @@ -0,0 +1,67 @@ +/* + * © 2020. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.model.participant.load.markov + +import java.time.LocalDateTime + +object SwitchOnProbabilityKey { + sealed trait Season + object Season { + case object Spring extends Season + case object Summer extends Season + case object Autumn extends Season + case object Winter extends Season + } + + sealed trait DayType + object DayType { + case object Weekday extends DayType + case object Weekend extends DayType + } + + case class SwitchOnProbabilityKey( + season: Season, + dayType: DayType, + quarterlyHourOfDay: Int, + ) + + def extractFromDateTime(dateTime: LocalDateTime): SwitchOnProbabilityKey = { + val season = getSeason(dateTime) + val dayType = getDayType(dateTime) + val quarterlyHourOfDay = getQuarterlyHourOfDay(dateTime) + SwitchOnProbabilityKey(season, dayType, quarterlyHourOfDay) + } + + private def getSeason(dateTime: LocalDateTime): Season = { + val month = dateTime.getMonthValue + if (month >= 3 && month <= 5) Season.Spring + else if (month >= 6 && month <= 8) Season.Summer + else if (month >= 9 && month <= 11) Season.Autumn + else Season.Winter + } + + private def getDayType(dateTime: LocalDateTime): DayType = { + val dayOfWeek = dateTime.getDayOfWeek.getValue + if (dayOfWeek >= 1 && dayOfWeek <= 5) DayType.Weekday + else DayType.Weekend + } + + private def getQuarterlyHourOfDay(dateTime: LocalDateTime): Int = { + val hour = dateTime.getHour + val minute = dateTime.getMinute + val quarter = minute / 15 + hour * 4 + quarter + } + + def getAll: Seq[SwitchOnProbabilityKey] = { + for { + season <- Seq(Season.Spring, Season.Summer, Season.Autumn, Season.Winter) + dayType <- Seq(DayType.Weekday, DayType.Weekend) + quarterlyHourOfDay <- 0 until (4 * 24) + } yield SwitchOnProbabilityKey(season, dayType, quarterlyHourOfDay) + } +} diff --git a/src/test/scala/edu/ie3/simona/model/participant/PvModelSpec.scala b/src/test/scala/edu/ie3/simona/model/participant/PvModelSpec.scala index 660852e28b..a90001276c 100644 --- a/src/test/scala/edu/ie3/simona/model/participant/PvModelSpec.scala +++ b/src/test/scala/edu/ie3/simona/model/participant/PvModelSpec.scala @@ -702,7 +702,7 @@ class PvModelSpec extends UnitSpec with GivenWhenThen with DefaultTestData { "eBeamSSol", ), (40d, 0d, 0d, -11.6d, -37.5d, 37.0d, - 67.777778d), // flat surface => eBeamS = eBeamH + 67.777778d), // by_Type.csv surface => eBeamS = eBeamH (40d, 60d, 0d, -11.6d, -37.5d, 37.0d, 112.84217113154841369d), // 2011-02-20T09:00:00 (40d, 60d, 0d, -11.6d, -78.0d, 75.0d, 210.97937494450755d), // sunrise diff --git a/src/test/scala/edu/ie3/simona/model/participant/load/markov/MarkovParamStoreSpec.scala b/src/test/scala/edu/ie3/simona/model/participant/load/markov/MarkovParamStoreSpec.scala new file mode 100644 index 0000000000..caf74b066e --- /dev/null +++ b/src/test/scala/edu/ie3/simona/model/participant/load/markov/MarkovParamStoreSpec.scala @@ -0,0 +1,58 @@ +/* + * © 2024. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.model.participant.load.markov + +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +class MarkovParamStoreSpec extends AnyFlatSpec with Matchers { + + "dish_washer" should "return a map of Season_Day Category's and their corresponding probabilities" in { + val probabilitiesMap = MarkovParamStore.Usage_Probabilities() + probabilitiesMap shouldBe a[Map[_, _]] + probabilitiesMap.size shouldEqual 12 + } + + "usage_probabilities" should "return a map of appliance Category's and their corresponding probabilities" in { + val probabilitiesMap = MarkovParamStore.Usage_Probabilities() + probabilitiesMap shouldBe a[Map[_, _]] + probabilitiesMap.size shouldEqual 12 + probabilitiesMap.getOrElse("other_load", 0.0) shouldEqual 1 + } + + "average_hh" should "return a map of appliances and their corresponding probabilities" in { + val average_HHMap = MarkovParamStore.Average_HH() + average_HHMap shouldBe a[Map[_, _]] + average_HHMap.size shouldEqual 13 + average_HHMap.getOrElse("lighting", 0.0) shouldEqual 2.5 + } + + "by_type" should "return a map of appliances in a House or Flat and their corresponding probabilities" in { + val TypeMap = MarkovParamStore.Type() + TypeMap shouldBe a[scala.collection.mutable.Map[_, _]] + TypeMap.size shouldEqual 2 + } + + "by_income" should "return a map of appliances per income and their corresponding probabilities" in { + val incomeMap = MarkovParamStore.income() + incomeMap shouldBe a[scala.collection.mutable.Map[_, _]] + incomeMap.size shouldEqual 8 + } + + "by_inhabitants" should "return a map of appliances per inhabitants and their corresponding probabilities" in { + val inhabitantsMap = MarkovParamStore.inhabitants() + inhabitantsMap shouldBe a[scala.collection.mutable.Map[_, _]] + inhabitantsMap.size shouldEqual 5 + } + + "load_ts" should "return a map of appliances and their corresponding Load Time Series" in { + val load_TSMap = MarkovParamStore.load_TS() + load_TSMap shouldBe a[scala.collection.mutable.Map[_, _]] + load_TSMap.size shouldEqual 13 + } + +}