From 09fc27f4ffe35fa7b3a0c3196a0e0aa6c6a533aa Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Sun, 28 Apr 2024 22:03:35 +0200 Subject: [PATCH 01/18] Filter FuturWarning before solving Those warnings should not be turned into errors --- src/multi_vector_simulator/D0_modelling_and_optimization.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/multi_vector_simulator/D0_modelling_and_optimization.py b/src/multi_vector_simulator/D0_modelling_and_optimization.py index 332cc2d15..e4da51b6e 100644 --- a/src/multi_vector_simulator/D0_modelling_and_optimization.py +++ b/src/multi_vector_simulator/D0_modelling_and_optimization.py @@ -348,6 +348,7 @@ def simulating(dict_values, model, local_energy_system): logging.info("Starting simulation.") # turn warnings into errors warnings.filterwarnings("error") + warnings.filterwarnings("always", category=FutureWarning) try: local_energy_system.solve( solver="cbc", From 63add0603e4d5b9acc63d2d3543ac45a1ec8607b Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Sun, 28 Apr 2024 22:08:47 +0200 Subject: [PATCH 02/18] Enable min and max load for Transformer The variables status and status_nominal are ignored from the results, as they are only present for non_convex investment cases --- .../D1_model_components.py | 52 ++++++++++++++++++- src/multi_vector_simulator/server.py | 4 +- 2 files changed, 53 insertions(+), 3 deletions(-) diff --git a/src/multi_vector_simulator/D1_model_components.py b/src/multi_vector_simulator/D1_model_components.py index 326f21074..11b6c6de7 100644 --- a/src/multi_vector_simulator/D1_model_components.py +++ b/src/multi_vector_simulator/D1_model_components.py @@ -641,6 +641,23 @@ def transformer_constant_efficiency_fix(model, dict_asset, **kwargs): else: # single input and single output + min_load_opts = {"min": 0, "max": 1} + min_load = dict_asset.get(SOC_MIN, None) + if min_load is not None: + if min_load[VALUE] != 0: + logging.warning( + f"Minimal load of {min_load[VALUE]} was set to asset {dict_asset[LABEL]}" + ) + min_load_opts["min"] = min_load[VALUE] + max_load = dict_asset.get(SOC_MAX, None) + if max_load is not None: + if max_load[VALUE] != 1: + logging.warning( + f"Maximal load of {max_load[VALUE]} was set to asset {dict_asset[LABEL]}" + ) + + min_load_opts["max"] = max_load[VALUE] + check_list_parameters_transformers_single_input_single_output( dict_asset, model.timeindex.size ) @@ -650,6 +667,7 @@ def transformer_constant_efficiency_fix(model, dict_asset, **kwargs): kwargs[OEMOF_BUSSES][dict_asset[OUTFLOW_DIRECTION]]: solph.Flow( nominal_value=dict_asset[INSTALLED_CAP][VALUE], variable_costs=dict_asset[DISPATCH_PRICE][VALUE], + **min_load_opts, ) } efficiencies = { @@ -691,10 +709,14 @@ def transformer_constant_efficiency_optimize(model, dict_asset, **kwargs): missing_dispatch_prices_or_efficiencies = None investment_bus = dict_asset.get(INVESTMENT_BUS) + invest_opts = {} + if dict_asset[MAXIMUM_ADD_CAP][VALUE] is not None: + invest_opts["maximum"] = dict_asset[MAXIMUM_ADD_CAP][VALUE] + investment = solph.Investment( ep_costs=dict_asset[SIMULATION_ANNUITY][VALUE], - maximum=dict_asset[MAXIMUM_ADD_CAP][VALUE], existing=dict_asset[INSTALLED_CAP][VALUE], + **invest_opts, ) # check if the transformer has multiple input or multiple output busses @@ -801,6 +823,32 @@ def transformer_constant_efficiency_optimize(model, dict_asset, **kwargs): # single input and single output + min_load_opts = {"min": 0, "max": 1} + min_load = dict_asset.get(SOC_MIN, None) + if min_load is not None: + if min_load[VALUE] != 0: + logging.warning( + f"Minimal load of {min_load[VALUE]} was set to asset {dict_asset[LABEL]}" + ) + min_load_opts["nonconvex"] = solph.NonConvex() + min_load_opts["min"] = min_load[VALUE] + + max_load = dict_asset.get(SOC_MAX, None) + if max_load is not None: + if max_load[VALUE] != 1: + logging.warning( + f"Maximal load of {max_load[VALUE]} was set to asset {dict_asset[LABEL]}" + ) + min_load_opts["nonconvex"] = solph.NonConvex() + + min_load_opts["max"] = max_load[VALUE] + + if "nonconvex" in min_load_opts: + if invest_opts.get("maximum", None) is None: + raise ValueError( + f"You need to provide a maximum_capacity to the asset {dict_asset[LABEL]}, if you set a minimal/maximal load different from 0/1" + ) + if investment_bus is None: investment_bus = dict_asset[OUTFLOW_DIRECTION] @@ -826,6 +874,7 @@ def transformer_constant_efficiency_optimize(model, dict_asset, **kwargs): kwargs[OEMOF_BUSSES][bus]: solph.Flow( investment=investment if bus == investment_bus else None, variable_costs=dict_asset[DISPATCH_PRICE][VALUE], + **min_load_opts, ) } @@ -842,7 +891,6 @@ def transformer_constant_efficiency_optimize(model, dict_asset, **kwargs): outputs=outputs, conversion_factors=efficiencies, ) - model.add(t) kwargs[OEMOF_TRANSFORMER].update({dict_asset[LABEL]: t}) diff --git a/src/multi_vector_simulator/server.py b/src/multi_vector_simulator/server.py index 9ead03c78..595ae016f 100644 --- a/src/multi_vector_simulator/server.py +++ b/src/multi_vector_simulator/server.py @@ -160,7 +160,9 @@ def __init__(self, results, busses_info=None, asset_types=None): ts_index = pd.to_datetime(js["index"][:-1], unit="ms") investments = df.iloc[-1] ts_df.index = ts_index - + for extra_var in ["status", "status_nominal"]: + if extra_var in ts_df: + ts_df.drop(extra_var, axis=1, inplace=True) super().__init__( data=ts_df.T.to_dict(orient="split")["data"], index=mindex, From 63e644e7ab5cd27470ab11a9da3ac4b458abdeaa Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Sun, 28 Apr 2024 22:12:50 +0200 Subject: [PATCH 03/18] Add costs to excess sinks of busses If the dictionnary containing the information about the bus contains a key "price", its value will be applied to the variable costs of the sink (unit of the price is currency/energy unit, default currency/kWh) --- src/multi_vector_simulator/C0_data_processing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/multi_vector_simulator/C0_data_processing.py b/src/multi_vector_simulator/C0_data_processing.py index 38ded725a..6bcba2da4 100644 --- a/src/multi_vector_simulator/C0_data_processing.py +++ b/src/multi_vector_simulator/C0_data_processing.py @@ -290,10 +290,12 @@ def define_excess_sinks(dict_values): for bus in dict_values[ENERGY_BUSSES]: excess_sink_name = bus + EXCESS_SINK energy_vector = dict_values[ENERGY_BUSSES][bus][ENERGY_VECTOR] + # TODO make this official if needed + excess_price = dict_values[ENERGY_BUSSES][bus].get("price", 0) define_sink( dict_values=dict_values, asset_key=excess_sink_name, - price={VALUE: 0, UNIT: CURR + "/" + UNIT}, + price={VALUE: excess_price, UNIT: CURR + "/" + UNIT}, inflow_direction=bus, energy_vector=energy_vector, asset_type="excess", From dc0713a7afc43aa8a6d0e3086b0b1dc4d5b14419 Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Sun, 28 Apr 2024 22:25:39 +0200 Subject: [PATCH 04/18] Introduce reducable demand The reducable demand should be listed within sinks, and provided an efficiency (number between 0 and 1). This efficiency correspond to the percent of the demand which must be provided (critical demand). The oemof-solph sinks which models the non-critical part of the demand has very small variable_costs such that it should not influence the costs calculations but should be fulfilled rather than dumping energy into excess sinks. --- .../D1_model_components.py | 93 ++++++++++++++++++- .../E1_process_results.py | 20 +++- .../utils/constants_json_strings.py | 4 + src/multi_vector_simulator/utils/helpers.py | 28 +++++- 4 files changed, 139 insertions(+), 6 deletions(-) diff --git a/src/multi_vector_simulator/D1_model_components.py b/src/multi_vector_simulator/D1_model_components.py index 11b6c6de7..07bbfedcd 100644 --- a/src/multi_vector_simulator/D1_model_components.py +++ b/src/multi_vector_simulator/D1_model_components.py @@ -47,6 +47,7 @@ MAXIMUM_ADD_CAP, MAXIMUM_ADD_CAP_NORMALIZED, DISPATCHABILITY, + TYPE_ASSET, OEMOF_ASSET_TYPE, OEMOF_GEN_STORAGE, OEMOF_SINK, @@ -57,8 +58,13 @@ EMISSION_FACTOR, BETA, INVESTMENT_BUS, + REDUCABLE_DEMAND, +) +from multi_vector_simulator.utils.helpers import ( + get_item_if_list, + get_length_if_list, + reducable_demand_name, ) -from multi_vector_simulator.utils.helpers import get_item_if_list, get_length_if_list from multi_vector_simulator.utils.exceptions import ( MissingParameterError, WrongParameterFormatError, @@ -354,7 +360,10 @@ def sink(model, dict_asset, **kwargs): """ if TIMESERIES in dict_asset: - sink_non_dispatchable(model, dict_asset, **kwargs) + if dict_asset.get(TYPE_ASSET) == REDUCABLE_DEMAND: + sink_demand_reduction(model, dict_asset, **kwargs) + else: + sink_non_dispatchable(model, dict_asset, **kwargs) else: sink_dispatchable_optimize(model, dict_asset, **kwargs) @@ -1360,6 +1369,86 @@ def sink_non_dispatchable(model, dict_asset, **kwargs): ) +def sink_demand_reduction(model, dict_asset, **kwargs): + r""" + Defines a non dispatchable sink to serve critical and non-critical demand. + + See :py:func:`~.sink` for more information, including parameters. + + Notes + ----- + Tested with: + - test_sink_non_dispatchable_single_input_bus() + - test_sink_non_dispatchable_multiple_input_busses() + + Returns + ------- + Indirectly updated `model` and dict of asset in `kwargs` with the sink object. + + """ + demand_reduction_factor = 1 - dict_asset[EFFICIENCY][VALUE] + tot_demand = dict_asset[TIMESERIES] + non_critical_demand_ts = tot_demand * demand_reduction_factor + non_critical_demand_peak = non_critical_demand_ts.max() + if non_critical_demand_peak == 0: + max_non_critical = 1 + else: + max_non_critical = non_critical_demand_ts / non_critical_demand_peak + critical_demand_ts = tot_demand * dict_asset[EFFICIENCY][VALUE] + + # check if the sink has multiple input busses + if isinstance(dict_asset[INFLOW_DIRECTION], list): + raise ( + ValueError( + f"The reducable demand {dict_asset[LABEL]} does not support multiple input busses" + ) + ) + # inputs_noncritical = {} + # inputs_critical = {} + # index = 0 + # for bus in dict_asset[INFLOW_DIRECTION]: + # inputs_critical[kwargs[OEMOF_BUSSES][bus]] = solph.Flow( + # fix=dict_asset[TIMESERIES], nominal_value=1 + # ) + # index += 1 + else: + inputs_noncritical = { + kwargs[OEMOF_BUSSES][dict_asset[INFLOW_DIRECTION]]: solph.Flow( + min=0, + max=max_non_critical, + nominal_value=non_critical_demand_peak, + variable_costs=-1e-15, + ) + } + inputs_critical = { + kwargs[OEMOF_BUSSES][dict_asset[INFLOW_DIRECTION]]: solph.Flow( + fix=critical_demand_ts, nominal_value=1 + ) + } + + non_critical_demand = solph.components.Sink( + label=reducable_demand_name(dict_asset[LABEL]), inputs=inputs_noncritical, + ) + critical_demand = solph.components.Sink( + label=reducable_demand_name(dict_asset[LABEL], critical=True), + inputs=inputs_critical, + ) + + # create and add demand sink and critical demand sink + + model.add(critical_demand) + model.add(non_critical_demand) + kwargs[OEMOF_SINK].update( + {reducable_demand_name(dict_asset[LABEL]): non_critical_demand} + ) + kwargs[OEMOF_SINK].update( + {reducable_demand_name(dict_asset[LABEL], critical=True): critical_demand} + ) + logging.debug( + f"Added: Reducable Non-dispatchable sink {dict_asset[LABEL]} to bus {dict_asset[INFLOW_DIRECTION]}" + ) + + def chp_fix(model, dict_asset, **kwargs): r""" Extraction turbine chp from Oemof solph. Extraction turbine must have one input and two outputs diff --git a/src/multi_vector_simulator/E1_process_results.py b/src/multi_vector_simulator/E1_process_results.py index 4424aa186..2731bba7d 100644 --- a/src/multi_vector_simulator/E1_process_results.py +++ b/src/multi_vector_simulator/E1_process_results.py @@ -12,7 +12,7 @@ import logging import copy import pandas as pd - +from multi_vector_simulator.utils.helpers import reducable_demand_name from multi_vector_simulator.utils.constants import TYPE_NONE, TOTAL_FLOW from multi_vector_simulator.utils.constants_json_strings import ( ECONOMIC_DATA, @@ -70,6 +70,7 @@ FIX_COST, LIFETIME_PRICE_DISPATCH, AVERAGE_SOC, + TYPE_ASSET, ) # Oemof.solph variables @@ -734,8 +735,21 @@ def get_flow(settings, bus, dict_asset, flow_tuple, multi_bus=None): the flow ('average_flow'). """ - flow = bus[OEMOF_SEQUENCES][(flow_tuple, OEMOF_FLOW)] - flow = cut_below_micro(flow, dict_asset[LABEL] + FLOW) + + if dict_asset.get(TYPE_ASSET) == "reducable_demand": + flow_tuple = (flow_tuple[0], reducable_demand_name(dict_asset[LABEL])) + flow = bus[OEMOF_SEQUENCES][(flow_tuple, OEMOF_FLOW)] + flow = cut_below_micro(flow, dict_asset[LABEL] + FLOW) + flow_tuple = (flow_tuple[0], reducable_demand_name(dict_asset[LABEL], critical=True)) + + flow_crit = bus[OEMOF_SEQUENCES][(flow_tuple, OEMOF_FLOW)] + flow_crit = cut_below_micro(flow_crit, dict_asset[LABEL] + FLOW) + flow = flow + flow_crit + + else: + flow = bus[OEMOF_SEQUENCES][(flow_tuple, OEMOF_FLOW)] + flow = cut_below_micro(flow, dict_asset[LABEL] + FLOW) + add_info_flows( evaluated_period=settings[EVALUATED_PERIOD][VALUE], dict_asset=dict_asset, diff --git a/src/multi_vector_simulator/utils/constants_json_strings.py b/src/multi_vector_simulator/utils/constants_json_strings.py index b925bd3fb..d668ecfe9 100644 --- a/src/multi_vector_simulator/utils/constants_json_strings.py +++ b/src/multi_vector_simulator/utils/constants_json_strings.py @@ -192,6 +192,10 @@ ) CONNECTED_FEEDIN_SINK = "connected_feedin_sink" +SUFFIX_CRITICAL = "critical" +SUFFIX_NONCRITICAL = "noncritical" +REDUCABLE_DEMAND = "reducable_demand" + # Autogenerated assets DISPATCHABILITY = "dispatchable" AVAILABILITY_DISPATCH = "availability_timeseries" diff --git a/src/multi_vector_simulator/utils/helpers.py b/src/multi_vector_simulator/utils/helpers.py index 7784991bb..c2832d81d 100644 --- a/src/multi_vector_simulator/utils/helpers.py +++ b/src/multi_vector_simulator/utils/helpers.py @@ -8,6 +8,7 @@ - find_valvue_by_key(): Finds value of a key in a nested dictionary. """ +from copy import deepcopy import os from multi_vector_simulator.utils.constants_json_strings import ( @@ -28,6 +29,9 @@ INFLOW_DIRECTION, OUTFLOW_DIRECTION, ENERGY_VECTOR, + SUFFIX_CRITICAL, + SUFFIX_NONCRITICAL, + REDUCABLE_DEMAND, ) @@ -120,6 +124,17 @@ def get_length_if_list(list_or_float): return answer +def reducable_demand_name(demand_name: str, critical: bool = False): + """Name for auto created bus related to peak demand pricing period""" + + if critical is False: + suffix = SUFFIX_NONCRITICAL + else: + suffix = SUFFIX_CRITICAL + + return f"{demand_name}_{suffix} {AUTO_CREATED_HIGHLIGHT}" + + def peak_demand_bus_name(dso_name: str, feedin: bool = False): """Name for auto created bus related to peak demand pricing period""" @@ -182,5 +197,16 @@ def get_asset_types(dict_values): for bus in input_bus + output_bus: asset_busses[bus] = dict_values[ENERGY_BUSSES][bus].get(ENERGY_VECTOR) asset_type["busses"] = asset_busses - asset_types.append(asset_type) + if asset_type[TYPE_ASSET] == REDUCABLE_DEMAND: + + asset_label = asset_type["label"] + asset_type["label"] = reducable_demand_name(asset_label) + asset_types.append(asset_type) + crit_asset_type = deepcopy(asset_type) + crit_asset_type["label"] = reducable_demand_name( + asset_label, critical=True + ) + asset_types.append(crit_asset_type) + else: + asset_types.append(asset_type) return asset_types From e54373075c2767a46ea9946db5cef8091eecbf43 Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Sun, 28 Apr 2024 22:26:40 +0200 Subject: [PATCH 05/18] Allow mvs server to set verbatim option --- src/multi_vector_simulator/server.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/multi_vector_simulator/server.py b/src/multi_vector_simulator/server.py index 595ae016f..9d5a83bc6 100644 --- a/src/multi_vector_simulator/server.py +++ b/src/multi_vector_simulator/server.py @@ -191,7 +191,7 @@ def asset_optimized_capacity(self, asset_name): return optimized_capacity -def run_simulation(json_dict, epa_format=True, **kwargs): +def run_simulation(json_dict, epa_format=True, verbatim=False, **kwargs): r""" Starts MVS tool simulation from an input json file @@ -311,7 +311,9 @@ def run_simulation(json_dict, epa_format=True, **kwargs): logging.debug("Convert results to json") if epa_format is True: - epa_dict_values = data_parser.convert_mvs_params_to_epa(dict_values) + epa_dict_values = data_parser.convert_mvs_params_to_epa( + dict_values, verbatim=verbatim + ) json_values = F0.store_as_json(epa_dict_values) answer = json.loads(json_values) From ee0566b294cb6dbf32d4df542fed68214cfef4a4 Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Sun, 28 Apr 2024 22:35:37 +0200 Subject: [PATCH 06/18] Update changelog --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d049cd9f7..f395941e5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,8 +23,13 @@ Here is a template for new release sections ### Added +- Introduce reducable demand. It should be listed within sinks, and provided an efficiency (number between 0 and 1). This efficiency correspond to the percent of the demand which must be provided (critical demand). The oemof-solph sinks which models the non-critical part of the demand has very small variable_costs such that it should not influence the costs calculations but should be fulfilled rather than dumping energy into excess sinks. Developed for the server version. (#969) + + ### Changed +- Add costs to excess sinks of busses. If the dictionary containing the information about the bus contains a key "price", its value will be applied to the variable costs of the sink (unit of the price is currency/energy unit, default currency/kWh). Developed for the server version. (#969) + ### Fixed ## [1.1.0] - 2024-04-27 From eed226648449f7c7aeb0efa4c6a1226dd2513fb9 Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Sun, 28 Apr 2024 23:03:09 +0200 Subject: [PATCH 07/18] Add mandatory build-os parameter to RTD config Our previous settings are deprecated since September 25th 2023 https://blog.readthedocs.com/migrate-configuration-v2/ --- .readthedocs.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.readthedocs.yml b/.readthedocs.yml index b88965058..03effe78e 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -3,6 +3,13 @@ # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # Required version: 2 + +# Set the version of Python and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.9" + # Build documentation in the docs/ directory with Sphinx sphinx: configuration: docs/conf.py @@ -12,7 +19,6 @@ sphinx: # configuration: mkdocs.yml # Optionally set the version of Python and requirements required to build your docs python: - version: 3.7 install: - requirements: requirements/docs.txt - requirements: requirements/default.txt From c0f96c8d5f9b41ace908964ddf1c44338f3b3b49 Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Sun, 28 Apr 2024 23:11:33 +0200 Subject: [PATCH 08/18] Bump up black version --- requirements/test.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/test.txt b/requirements/test.txt index d662fc036..2de47e890 100644 --- a/requirements/test.txt +++ b/requirements/test.txt @@ -1,5 +1,5 @@ pytest>=5.3.1 -black==19.10b0 +black==24.3.0 coverage>=4.5 coveralls>=3.0.1 mock>=3.0.5 From fa83b34f521058de37a2dcb63afa7196d2c43862 Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Sun, 28 Apr 2024 23:12:27 +0200 Subject: [PATCH 09/18] Lint with black --- docs/conf.py | 56 +++++++++--- setup.py | 5 +- .../A0_initialization.py | 3 +- src/multi_vector_simulator/A1_csv_to_json.py | 7 +- .../B0_data_input_json.py | 4 +- .../C0_data_processing.py | 69 +++++++++++---- .../C2_economic_functions.py | 8 +- .../D0_modelling_and_optimization.py | 3 +- .../D1_model_components.py | 44 ++++++---- .../D2_model_constraints.py | 40 ++++++--- src/multi_vector_simulator/E0_evaluation.py | 9 +- .../E1_process_results.py | 8 +- src/multi_vector_simulator/E2_economics.py | 10 ++- .../E3_indicator_calculation.py | 5 +- src/multi_vector_simulator/F0_output.py | 1 - src/multi_vector_simulator/F1_plotting.py | 20 ++++- src/multi_vector_simulator/F2_autoreport.py | 63 +++++++++----- src/multi_vector_simulator/cli.py | 4 +- src/multi_vector_simulator/utils/__init__.py | 4 +- src/multi_vector_simulator/utils/constants.py | 86 +++++++++++++++---- .../utils/data_parser.py | 25 ++++-- tests/test_A0_initialization.py | 2 +- tests/test_A1_csv_to_json.py | 5 +- tests/test_B0_data_input_json.py | 31 ++++++- tests/test_C0_data_processing.py | 42 ++++++--- tests/test_C1_verification.py | 2 +- tests/test_D1_model_components.py | 45 ++++++---- tests/test_D2_model_constraints.py | 37 +++++--- tests/test_E1_process_results.py | 12 ++- tests/test_E2_economics.py | 10 ++- tests/test_E3_indicator_calculation.py | 7 +- tests/test_E4_verification.py | 27 ++++-- tests/test_F0_output.py | 12 ++- tests/test_benchmark_KPI.py | 10 ++- tests/test_benchmark_constraints.py | 1 + tests/test_benchmark_scenarios.py | 1 + tests/test_utils.py | 2 +- 37 files changed, 527 insertions(+), 193 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index dde627342..4817c33e3 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -75,7 +75,13 @@ def generate_parameter_description(input_csv_file, output_rst_file): see_also = [] lines = ( lines - + [f".. _{props.ref}:", "", props.label, "^" * len(props.label), "",] + + [ + f".. _{props.ref}:", + "", + props.label, + "^" * len(props.label), + "", + ] + [f"{p} {props[p]}" for p in parameter_properties] + [""] + [ @@ -83,7 +89,10 @@ def generate_parameter_description(input_csv_file, output_rst_file): + ", ".join([f":ref:`{cat}`" for cat in props.category.split(";")]) ] + see_also - + ["", "",] + + [ + "", + "", + ] ) with open(output_rst_file, "w") as ofs: @@ -139,11 +148,22 @@ def generate_parameter_categories( lines = ( lines - + [f".. _{props.ref}:", "", cat_label, "^" * len(cat_label), "",] + + [ + f".. _{props.ref}:", + "", + cat_label, + "^" * len(cat_label), + "", + ] + props.description.split("\\n") - + ["",] + + [ + "", + ] + [f"* :ref:`{p}`" for p in parameter_per_cat] - + ["", "",] + + [ + "", + "", + ] ) with open(output_rst_file, "w") as ofs: @@ -194,10 +214,17 @@ def generate_kpi_categories(input_param_csv_file, input_cat_csv_file, output_rst lines = ( lines - + [f"{props.description} These are the calculated {props.category} KPI:",] - + ["",] + + [ + f"{props.description} These are the calculated {props.category} KPI:", + ] + + [ + "", + ] + [f"* :ref:`{parameters[p]} <{p}>`" for p in parameter_per_cat] - + ["", "",] + + [ + "", + "", + ] ) with open(output_rst_file, "w") as ofs: @@ -290,10 +317,19 @@ def generate_kpi_description(input_csv_file, output_path): # Write lines based on definitions to an *.inc file lines = ( - [f".. _{props.ref}:", "", title, "^" * len(title), "",] + [ + f".. _{props.ref}:", + "", + title, + "^" * len(title), + "", + ] + [f"{p} {props[p]}" for p in parameter_properties] + [f":Related indicators: {see_also}"] - + ["", "",] + + [ + "", + "", + ] ) with open(os.path.join(output_path, props.ref + ".inc"), "w") as ofs: diff --git a/setup.py b/setup.py index fd250bd33..94f2018fd 100644 --- a/setup.py +++ b/setup.py @@ -138,7 +138,10 @@ def parse_requirements_file(filename): # called `my_module.py` to exist: # # py_modules=["my_module"], - packages=["multi_vector_simulator", "multi_vector_simulator.utils",], # Required + packages=[ + "multi_vector_simulator", + "multi_vector_simulator.utils", + ], # Required # Specify which Python versions you support. In contrast to the # 'Programming Language' classifiers above, 'pip install' will check this # and refuse to install the project if the version does not match. If you diff --git a/src/multi_vector_simulator/A0_initialization.py b/src/multi_vector_simulator/A0_initialization.py index f99745829..4f4aff83b 100644 --- a/src/multi_vector_simulator/A0_initialization.py +++ b/src/multi_vector_simulator/A0_initialization.py @@ -245,7 +245,8 @@ def report_arg_parser(): :return: parser """ parser = argparse.ArgumentParser( - prog="mvs_report", description="Display the report of a MVS simulation", + prog="mvs_report", + description="Display the report of a MVS simulation", ) parser.add_argument( "-pdf", diff --git a/src/multi_vector_simulator/A1_csv_to_json.py b/src/multi_vector_simulator/A1_csv_to_json.py index d192bc414..b76ce7f8c 100644 --- a/src/multi_vector_simulator/A1_csv_to_json.py +++ b/src/multi_vector_simulator/A1_csv_to_json.py @@ -97,7 +97,8 @@ def create_input_json( - input_directory, pass_back=True, + input_directory, + pass_back=True, ): """Convert csv files to json file as input for the simulation. @@ -515,7 +516,9 @@ def create_json_from_csv( df.loc[STORAGE_FILENAME][column] ) storage_dict = add_storage_components( - storage_file_name, input_directory, single_dict[column][LABEL], + storage_file_name, + input_directory, + single_dict[column][LABEL], ) single_dict[column].update(storage_dict) diff --git a/src/multi_vector_simulator/B0_data_input_json.py b/src/multi_vector_simulator/B0_data_input_json.py index 982d082b8..ea5b229a0 100644 --- a/src/multi_vector_simulator/B0_data_input_json.py +++ b/src/multi_vector_simulator/B0_data_input_json.py @@ -2,6 +2,7 @@ Module B0 - Data input json =========================== """ + import logging import copy import json @@ -321,7 +322,8 @@ def load_json( os.replace( path_input_file, os.path.join( - dict_values[SIMULATION_SETTINGS][PATH_OUTPUT_FOLDER_INPUTS], CSV_FNAME, + dict_values[SIMULATION_SETTINGS][PATH_OUTPUT_FOLDER_INPUTS], + CSV_FNAME, ), ) diff --git a/src/multi_vector_simulator/C0_data_processing.py b/src/multi_vector_simulator/C0_data_processing.py index 6bcba2da4..f66ae826d 100644 --- a/src/multi_vector_simulator/C0_data_processing.py +++ b/src/multi_vector_simulator/C0_data_processing.py @@ -334,7 +334,9 @@ def energyConversion(dict_values, group): and HEADER in dict_values[group][asset][EFFICIENCY] ): receive_timeseries_from_csv( - dict_values[SIMULATION_SETTINGS], dict_values[group][asset], EFFICIENCY, + dict_values[SIMULATION_SETTINGS], + dict_values[group][asset], + EFFICIENCY, ) # in case there is more than one parameter provided (either (A) n input busses and 1 output bus or (B) 1 input bus and n output busses) # dictionaries with filenames and headers will be replaced by timeseries, scalars will be mantained @@ -398,7 +400,8 @@ def energyStorage(dict_values, group): for asset in dict_values[group]: for subasset in [STORAGE_CAPACITY, INPUT_POWER, OUTPUT_POWER]: define_missing_cost_data( - dict_values, dict_values[group][asset][subasset], + dict_values, + dict_values[group][asset][subasset], ) evaluate_lifetime_costs( dict_values[SIMULATION_SETTINGS], @@ -504,7 +507,9 @@ def define_missing_cost_data(dict_values, dict_asset): if DISPATCH_PRICE in dict_asset: if isinstance(dict_asset[DISPATCH_PRICE][VALUE], dict): receive_timeseries_from_csv( - dict_values[SIMULATION_SETTINGS], dict_asset, DISPATCH_PRICE, + dict_values[SIMULATION_SETTINGS], + dict_asset, + DISPATCH_PRICE, ) elif isinstance(dict_asset[DISPATCH_PRICE][VALUE], list): treat_multiple_flows(dict_asset, dict_values, DISPATCH_PRICE) @@ -519,8 +524,14 @@ def define_missing_cost_data(dict_values, dict_asset): SPECIFIC_COSTS: {VALUE: 0, UNIT: CURR + "/" + UNIT}, SPECIFIC_COSTS_OM: {VALUE: 0, UNIT: CURR + "/" + UNIT_YEAR}, DISPATCH_PRICE: {VALUE: 0, UNIT: CURR + "/" + UNIT + "/" + UNIT_YEAR}, - LIFETIME: {VALUE: economic_data[PROJECT_DURATION][VALUE], UNIT: UNIT_YEAR,}, - AGE_INSTALLED: {VALUE: 0, UNIT: UNIT_YEAR,}, + LIFETIME: { + VALUE: economic_data[PROJECT_DURATION][VALUE], + UNIT: UNIT_YEAR, + }, + AGE_INSTALLED: { + VALUE: 0, + UNIT: UNIT_YEAR, + }, } # checks that an asset has all cost parameters needed for evaluation. @@ -722,11 +733,17 @@ def define_auxiliary_assets_of_energy_providers(dict_values, dso_name): ) dict_availability_timeseries = define_availability_of_peak_demand_pricing_assets( - dict_values, number_of_pricing_periods, months_in_a_period, + dict_values, + number_of_pricing_periods, + months_in_a_period, ) - list_of_dso_energyConversion_assets = add_a_transformer_for_each_peak_demand_pricing_period( - dict_values, dso_dict, dict_availability_timeseries, + list_of_dso_energyConversion_assets = ( + add_a_transformer_for_each_peak_demand_pricing_period( + dict_values, + dso_dict, + dict_availability_timeseries, + ) ) define_source( @@ -1025,7 +1042,10 @@ def define_transformer_for_peak_demand_pricing( AVAILABILITY_DISPATCH: timeseries_availability, EFFICIENCY: {VALUE: 1, UNIT: "factor"}, DEVELOPMENT_COSTS: {VALUE: 0, UNIT: CURR}, - SPECIFIC_COSTS: {VALUE: 0, UNIT: CURR + "/" + dict_dso[UNIT],}, + SPECIFIC_COSTS: { + VALUE: 0, + UNIT: CURR + "/" + dict_dso[UNIT], + }, # the demand pricing is only applied to consumption SPECIFIC_COSTS_OM: { VALUE: dict_dso[PEAK_DEMAND_PRICING][VALUE], @@ -1058,7 +1078,10 @@ def define_transformer_for_peak_demand_pricing( AVAILABILITY_DISPATCH: timeseries_availability, EFFICIENCY: {VALUE: 1, UNIT: "factor"}, DEVELOPMENT_COSTS: {VALUE: 0, UNIT: CURR}, - SPECIFIC_COSTS: {VALUE: 0, UNIT: CURR + "/" + dict_dso[UNIT],}, + SPECIFIC_COSTS: { + VALUE: 0, + UNIT: CURR + "/" + dict_dso[UNIT], + }, # the demand pricing is only applied to consumption SPECIFIC_COSTS_OM: { VALUE: 0, @@ -1068,7 +1091,7 @@ def define_transformer_for_peak_demand_pricing( OEMOF_ASSET_TYPE: OEMOF_TRANSFORMER, ENERGY_VECTOR: dict_dso[ENERGY_VECTOR], AGE_INSTALLED: {VALUE: 0, UNIT: UNIT_YEAR}, - TYPE_ASSET: dict_dso.get(TYPE_ASSET) + TYPE_ASSET: dict_dso.get(TYPE_ASSET), # LIFETIME: {VALUE: 100, UNIT: UNIT_YEAR}, } if dict_dso.get(DSO_FEEDIN_CAP, None) is not None: @@ -1150,7 +1173,10 @@ def define_source( }, OPTIMIZE_CAP: {VALUE: True, UNIT: TYPE_BOOL}, MAXIMUM_CAP: {VALUE: None, UNIT: "?"}, - AGE_INSTALLED: {VALUE: 0, UNIT: UNIT_YEAR,}, + AGE_INSTALLED: { + VALUE: 0, + UNIT: UNIT_YEAR, + }, ENERGY_VECTOR: energy_vector, EMISSION_FACTOR: emission_factor, TYPE_ASSET: asset_type, @@ -1334,7 +1360,10 @@ def define_sink( VALUE: dict_values[ECONOMIC_DATA][PROJECT_DURATION][VALUE], UNIT: UNIT_YEAR, }, - AGE_INSTALLED: {VALUE: 0, UNIT: UNIT_YEAR,}, + AGE_INSTALLED: { + VALUE: 0, + UNIT: UNIT_YEAR, + }, ENERGY_VECTOR: energy_vector, OPTIMIZE_CAP: {VALUE: True, UNIT: TYPE_BOOL}, DISPATCHABILITY: {VALUE: True, UNIT: TYPE_BOOL}, @@ -1408,7 +1437,9 @@ def define_sink( for item in [SPECIFIC_COSTS, SPECIFIC_COSTS_OM]: if item in kwargs: sink.update( - {item: kwargs[item],} + { + item: kwargs[item], + } ) # update dictionary @@ -1751,8 +1782,14 @@ def compute_timeseries_properties(dict_asset): dict_asset.update( { - TIMESERIES_PEAK: {VALUE: max(timeseries), UNIT: unit,}, - TIMESERIES_TOTAL: {VALUE: sum(timeseries), UNIT: unit,}, + TIMESERIES_PEAK: { + VALUE: max(timeseries), + UNIT: unit, + }, + TIMESERIES_TOTAL: { + VALUE: sum(timeseries), + UNIT: unit, + }, TIMESERIES_AVERAGE: { VALUE: sum(timeseries) / len(timeseries), UNIT: unit, diff --git a/src/multi_vector_simulator/C2_economic_functions.py b/src/multi_vector_simulator/C2_economic_functions.py index 8f25b60e1..f0ebfbe07 100644 --- a/src/multi_vector_simulator/C2_economic_functions.py +++ b/src/multi_vector_simulator/C2_economic_functions.py @@ -13,6 +13,7 @@ - calculate present costs based on annuity - calculate effective fuel price cost, in case there is a annual fuel price change (this functionality still has to be checked in this module) """ + import logging import pandas as pd @@ -26,6 +27,7 @@ LIFETIME_PRICE_DISPATCH, ) + # annuity factor to calculate present value of cash flows def annuity_factor(project_life, discount_factor): r""" @@ -258,9 +260,9 @@ def get_replacement_costs( # Subtraction of component value at end of life with last replacement (= number_of_investments - 1) replacement_costs -= value_at_project_end # Update cash flow projection (specific) - present_value_of_capital_expenditures.loc[ - project_lifetime - ] = -value_at_project_end + present_value_of_capital_expenditures.loc[project_lifetime] = ( + -value_at_project_end + ) return replacement_costs diff --git a/src/multi_vector_simulator/D0_modelling_and_optimization.py b/src/multi_vector_simulator/D0_modelling_and_optimization.py index e4da51b6e..5eda20fbc 100644 --- a/src/multi_vector_simulator/D0_modelling_and_optimization.py +++ b/src/multi_vector_simulator/D0_modelling_and_optimization.py @@ -313,7 +313,8 @@ def store_lp_file(dict_values, local_energy_system): ) logging.debug("Saving to lp-file.") local_energy_system.write( - path_lp_file, io_options={"symbolic_solver_labels": True}, + path_lp_file, + io_options={"symbolic_solver_labels": True}, ) def simulating(dict_values, model, local_energy_system): diff --git a/src/multi_vector_simulator/D1_model_components.py b/src/multi_vector_simulator/D1_model_components.py index 07bbfedcd..26910e852 100644 --- a/src/multi_vector_simulator/D1_model_components.py +++ b/src/multi_vector_simulator/D1_model_components.py @@ -597,9 +597,9 @@ def transformer_constant_efficiency_fix(model, dict_asset, **kwargs): } efficiencies = {} for i, efficiency in enumerate(dict_asset[EFFICIENCY][VALUE]): - efficiencies[ - kwargs[OEMOF_BUSSES][dict_asset[INFLOW_DIRECTION][i]] - ] = efficiency + efficiencies[kwargs[OEMOF_BUSSES][dict_asset[INFLOW_DIRECTION][i]]] = ( + efficiency + ) elif isinstance(dict_asset[INFLOW_DIRECTION], str) and isinstance( dict_asset[OUTFLOW_DIRECTION], list @@ -631,9 +631,9 @@ def transformer_constant_efficiency_fix(model, dict_asset, **kwargs): efficiencies = {} for i, efficiency in enumerate(dict_asset[EFFICIENCY][VALUE]): - efficiencies[ - kwargs[OEMOF_BUSSES][dict_asset[OUTFLOW_DIRECTION][i]] - ] = efficiency + efficiencies[kwargs[OEMOF_BUSSES][dict_asset[OUTFLOW_DIRECTION][i]]] = ( + efficiency + ) else: # multiple inputs and multiple outputs @@ -772,9 +772,9 @@ def transformer_constant_efficiency_optimize(model, dict_asset, **kwargs): efficiencies = {} for i, efficiency in enumerate(dict_asset[EFFICIENCY][VALUE]): - efficiencies[ - kwargs[OEMOF_BUSSES][dict_asset[INFLOW_DIRECTION][i]] - ] = efficiency + efficiencies[kwargs[OEMOF_BUSSES][dict_asset[INFLOW_DIRECTION][i]]] = ( + efficiency + ) elif isinstance(dict_asset[INFLOW_DIRECTION], str) and isinstance( dict_asset[OUTFLOW_DIRECTION], list @@ -1053,7 +1053,7 @@ def storage_optimize(model, dict_asset, **kwargs): ], # efficiency of discharge invest_relation_input_capacity=dict_asset[INPUT_POWER][C_RATE][VALUE], # storage can be charged with invest_relation_output_capacity*capacity in one timeperiod - invest_relation_output_capacity=dict_asset[OUTPUT_POWER][C_RATE][VALUE] + invest_relation_output_capacity=dict_asset[OUTPUT_POWER][C_RATE][VALUE], # storage can be emptied with invest_relation_output_capacity*capacity in one timeperiod ) model.add(storage) @@ -1185,7 +1185,8 @@ def source_dispatchable_optimize(model, dict_asset, **kwargs): ) } source_dispatchable = solph.components.Source( - label=dict_asset[LABEL], outputs=outputs, + label=dict_asset[LABEL], + outputs=outputs, ) else: if TIMESERIES in dict_asset: @@ -1252,7 +1253,8 @@ def source_dispatchable_fix(model, dict_asset, **kwargs): ) } source_dispatchable = solph.components.Source( - label=dict_asset[LABEL], outputs=outputs, + label=dict_asset[LABEL], + outputs=outputs, ) else: if TIMESERIES in dict_asset: @@ -1271,7 +1273,8 @@ def source_dispatchable_fix(model, dict_asset, **kwargs): ) } source_dispatchable = solph.components.Source( - label=dict_asset[LABEL], outputs=outputs, + label=dict_asset[LABEL], + outputs=outputs, ) model.add(source_dispatchable) kwargs[OEMOF_SOURCE].update({dict_asset[LABEL]: source_dispatchable}) @@ -1319,7 +1322,10 @@ def sink_dispatchable_optimize(model, dict_asset, **kwargs): } # create and add excess electricity sink to micro_grid_system - variable - sink_dispatchable = solph.components.Sink(label=dict_asset[LABEL], inputs=inputs,) + sink_dispatchable = solph.components.Sink( + label=dict_asset[LABEL], + inputs=inputs, + ) model.add(sink_dispatchable) kwargs[OEMOF_SINK].update({dict_asset[LABEL]: sink_dispatchable}) logging.debug( @@ -1361,7 +1367,10 @@ def sink_non_dispatchable(model, dict_asset, **kwargs): } # create and add demand sink to micro_grid_system - fixed - sink_demand = solph.components.Sink(label=dict_asset[LABEL], inputs=inputs,) + sink_demand = solph.components.Sink( + label=dict_asset[LABEL], + inputs=inputs, + ) model.add(sink_demand) kwargs[OEMOF_SINK].update({dict_asset[LABEL]: sink_demand}) logging.debug( @@ -1427,7 +1436,8 @@ def sink_demand_reduction(model, dict_asset, **kwargs): } non_critical_demand = solph.components.Sink( - label=reducable_demand_name(dict_asset[LABEL]), inputs=inputs_noncritical, + label=reducable_demand_name(dict_asset[LABEL]), + inputs=inputs_noncritical, ) critical_demand = solph.components.Sink( label=reducable_demand_name(dict_asset[LABEL], critical=True), @@ -1515,7 +1525,7 @@ def chp_optimize(model, dict_asset, **kwargs): ----- Tested with: - test_to_be_written() - + Returns ------- Indirectly updated `model` and dict of asset in `kwargs` with the extraction turbine component. diff --git a/src/multi_vector_simulator/D2_model_constraints.py b/src/multi_vector_simulator/D2_model_constraints.py index 9ea7d47e0..df656bdf0 100644 --- a/src/multi_vector_simulator/D2_model_constraints.py +++ b/src/multi_vector_simulator/D2_model_constraints.py @@ -9,6 +9,7 @@ constraints should be tested in-code (examples) and by comparing the lp file generated. """ + import logging import pyomo.environ as po from oemof.solph import constraints @@ -178,7 +179,10 @@ def constraint_minimal_renewable_share(model, dict_values, dict_model): ( renewable_assets, non_renewable_assets, - ) = prepare_constraint_minimal_renewable_share(dict_values, dict_model,) + ) = prepare_constraint_minimal_renewable_share( + dict_values, + dict_model, + ) def renewable_share_rule(model): renewable_generation = 0 @@ -237,7 +241,8 @@ def renewable_share_rule(model): def prepare_constraint_minimal_renewable_share( - dict_values, dict_model, + dict_values, + dict_model, ): r""" Prepare creating the minimal renewable factor constraint by processing dict_values @@ -429,10 +434,16 @@ def constraint_minimal_degree_of_autonomy(model, dict_values, dict_model): if dict_values[CONSTRAINTS][MINIMAL_DEGREE_OF_AUTONOMY][VALUE] > 0: - demands = prepare_demand_assets(dict_values, dict_model,) + demands = prepare_demand_assets( + dict_values, + dict_model, + ) - energy_provider_consumption_sources = prepare_energy_provider_consumption_sources( - dict_values, dict_model, + energy_provider_consumption_sources = ( + prepare_energy_provider_consumption_sources( + dict_values, + dict_model, + ) ) def degree_of_autonomy_rule(model): @@ -494,7 +505,8 @@ def degree_of_autonomy_rule(model): def prepare_demand_assets( - dict_values, dict_model, + dict_values, + dict_model, ): r""" Perpare demand assets by processing `dict_values` @@ -557,7 +569,8 @@ def prepare_demand_assets( def prepare_energy_provider_consumption_sources( - dict_values, dict_model, + dict_values, + dict_model, ): r""" Prepare energy provider consumption sources by processing `dict_values`. @@ -624,7 +637,8 @@ def prepare_energy_provider_consumption_sources( def prepare_energy_provider_feedin_sinks( - dict_values, dict_model, + dict_values, + dict_model, ): r""" Prepare energy provider feedin sinks by processing `dict_values`. @@ -719,11 +733,15 @@ def constraint_net_zero_energy(model, dict_values, dict_model): if dict_values[CONSTRAINTS][NET_ZERO_ENERGY][VALUE] == True: energy_provider_feedin_sinks = prepare_energy_provider_feedin_sinks( - dict_values, dict_model, + dict_values, + dict_model, ) - energy_provider_consumption_sources = prepare_energy_provider_consumption_sources( - dict_values, dict_model, + energy_provider_consumption_sources = ( + prepare_energy_provider_consumption_sources( + dict_values, + dict_model, + ) ) def net_zero_energy(model): diff --git a/src/multi_vector_simulator/E0_evaluation.py b/src/multi_vector_simulator/E0_evaluation.py index e4a086975..814080eb6 100644 --- a/src/multi_vector_simulator/E0_evaluation.py +++ b/src/multi_vector_simulator/E0_evaluation.py @@ -109,7 +109,8 @@ def evaluate_dict(dict_values, results_main, results_meta): bus_data.update( { dict_values[ENERGY_STORAGE][storage][LABEL]: solph.views.node( - results_main, dict_values[ENERGY_STORAGE][storage][LABEL], + results_main, + dict_values[ENERGY_STORAGE][storage][LABEL], ) } ) @@ -160,9 +161,9 @@ def evaluate_dict(dict_values, results_main, results_meta): + f") {SOC}" ) - dict_values[OPTIMIZED_FLOWS][inflow_direction][ - timeseries_name - ] = dict_values[ENERGY_STORAGE][storage][TIMESERIES_SOC] + dict_values[OPTIMIZED_FLOWS][inflow_direction][timeseries_name] = ( + dict_values[ENERGY_STORAGE][storage][TIMESERIES_SOC] + ) for group in [ENERGY_CONVERSION, ENERGY_PRODUCTION, ENERGY_CONSUMPTION]: for asset in dict_values[group]: diff --git a/src/multi_vector_simulator/E1_process_results.py b/src/multi_vector_simulator/E1_process_results.py index 2731bba7d..f55ad746f 100644 --- a/src/multi_vector_simulator/E1_process_results.py +++ b/src/multi_vector_simulator/E1_process_results.py @@ -9,6 +9,7 @@ - add the evaluation of time series """ + import logging import copy import pandas as pd @@ -740,7 +741,10 @@ def get_flow(settings, bus, dict_asset, flow_tuple, multi_bus=None): flow_tuple = (flow_tuple[0], reducable_demand_name(dict_asset[LABEL])) flow = bus[OEMOF_SEQUENCES][(flow_tuple, OEMOF_FLOW)] flow = cut_below_micro(flow, dict_asset[LABEL] + FLOW) - flow_tuple = (flow_tuple[0], reducable_demand_name(dict_asset[LABEL], critical=True)) + flow_tuple = ( + flow_tuple[0], + reducable_demand_name(dict_asset[LABEL], critical=True), + ) flow_crit = bus[OEMOF_SEQUENCES][(flow_tuple, OEMOF_FLOW)] flow_crit = cut_below_micro(flow_crit, dict_asset[LABEL] + FLOW) @@ -994,7 +998,7 @@ def convert_components_to_dataframe(dict_values): components_list = [keys_production, keys_conversion] # Defining the columns of the table and filling them up with the appropriate data - for (component_key, comp_dict) in zip(components_list, comp_dict_list): + for component_key, comp_dict in zip(components_list, comp_dict_list): for comps in component_key: # Define whether optimization takes place optimize = translate_optimizeCap_from_boolean_to_yes_no( diff --git a/src/multi_vector_simulator/E2_economics.py b/src/multi_vector_simulator/E2_economics.py index 236a4b50d..1ecac5732 100644 --- a/src/multi_vector_simulator/E2_economics.py +++ b/src/multi_vector_simulator/E2_economics.py @@ -169,10 +169,12 @@ def get_costs(dict_asset, economic_data): ) # Operation and management expenditures over the project lifetime - operation_and_management_expenditures = calculate_operation_and_management_expenditures( - specific_om_cost=dict_asset[LIFETIME_SPECIFIC_COST_OM][VALUE], - installed_capacity=dict_asset[INSTALLED_CAP][VALUE], - optimized_add_capacity=dict_asset[OPTIMIZED_ADD_CAP][VALUE], + operation_and_management_expenditures = ( + calculate_operation_and_management_expenditures( + specific_om_cost=dict_asset[LIFETIME_SPECIFIC_COST_OM][VALUE], + installed_capacity=dict_asset[INSTALLED_CAP][VALUE], + optimized_add_capacity=dict_asset[OPTIMIZED_ADD_CAP][VALUE], + ) ) dict_asset.update( { diff --git a/src/multi_vector_simulator/E3_indicator_calculation.py b/src/multi_vector_simulator/E3_indicator_calculation.py index 5cebf9d79..a28d87cc3 100644 --- a/src/multi_vector_simulator/E3_indicator_calculation.py +++ b/src/multi_vector_simulator/E3_indicator_calculation.py @@ -13,6 +13,7 @@ - calculate onsite energy fraction (OEF) - calculate onsite energy matching (OEM) """ + import logging from multi_vector_simulator.utils.constants import DEFAULT_WEIGHTS_ENERGY_CARRIERS @@ -861,7 +862,7 @@ def add_onsite_energy_fraction(dict_values): dict_values: dict dict with all project information and results after applying total_renewable_and_non_renewable_energy_origin - + Returns ------- None @@ -942,7 +943,7 @@ def add_onsite_energy_matching(dict_values): after applying total_renewable_and_non_renewable_energy_origin and total_demand_and_excess_each_sector and add_onsite_energy_fraction - + Returns ------- None diff --git a/src/multi_vector_simulator/F0_output.py b/src/multi_vector_simulator/F0_output.py index 93b35ef73..cf456fe8d 100644 --- a/src/multi_vector_simulator/F0_output.py +++ b/src/multi_vector_simulator/F0_output.py @@ -13,7 +13,6 @@ - Store dictionary to Json """ - import json import logging import os diff --git a/src/multi_vector_simulator/F1_plotting.py b/src/multi_vector_simulator/F1_plotting.py index 545054166..083978e41 100644 --- a/src/multi_vector_simulator/F1_plotting.py +++ b/src/multi_vector_simulator/F1_plotting.py @@ -861,7 +861,8 @@ def create_plotly_barplot_fig( def plot_optimized_capacities( - dict_values, file_path=None, + dict_values, + file_path=None, ): """Plot capacities as a bar chart. @@ -882,7 +883,8 @@ def plot_optimized_capacities( # Add dataframe to hold all the KPIs and optimized additional capacities df_capacities = dict_values[KPI][KPI_SCALAR_MATRIX].copy(deep=True) df_capacities.drop( - columns=[TOTAL_FLOW, ANNUAL_TOTAL_FLOW, PEAK_FLOW, AVERAGE_FLOW], inplace=True, + columns=[TOTAL_FLOW, ANNUAL_TOTAL_FLOW, PEAK_FLOW, AVERAGE_FLOW], + inplace=True, ) df_capacities.reset_index(drop=True, inplace=True) @@ -1000,7 +1002,11 @@ def create_plotly_flow_fig( "xanchor": "center", "yanchor": "top", }, - legend=dict(y=0.5, traceorder="normal", font=dict(color="black"),), + legend=dict( + y=0.5, + traceorder="normal", + font=dict(color="black"), + ), ) if file_path is not None: @@ -1168,7 +1174,13 @@ def create_plotly_piechart_fig( height=500, width=700, autosize=True, - legend=dict(orientation="v", y=0.5, yanchor="middle", x=0.95, xanchor="right",), + legend=dict( + orientation="v", + y=0.5, + yanchor="middle", + x=0.95, + xanchor="right", + ), margin=dict(l=10, r=10, b=50, pad=2), uniformtext_minsize=18, ) diff --git a/src/multi_vector_simulator/F2_autoreport.py b/src/multi_vector_simulator/F2_autoreport.py index ff5bed05f..92463573b 100644 --- a/src/multi_vector_simulator/F2_autoreport.py +++ b/src/multi_vector_simulator/F2_autoreport.py @@ -120,7 +120,10 @@ async def _print_pdf_from_chrome(path_pdf_report): "http://127.0.0.1:8050", {"waitUntil": "domcontentloaded", "timeout": 120000} ) await page.waitForSelector( - ".dash-cell", {"visible": "true",}, + ".dash-cell", + { + "visible": "true", + }, ) await page.pdf({"path": path_pdf_report, "format": "A4", "printBackground": True}) await browser.close() @@ -359,7 +362,9 @@ def insert_log_messages(log_dict): def insert_plotly_figure( - fig, id_plot=None, print_only=False, + fig, + id_plot=None, + print_only=False, ): r""" Insert a plotly figure in a dash app layout @@ -406,7 +411,12 @@ def insert_plotly_figure( # Dynamic plotly figure for the app if print_only is False: rendered_plots.append( - dcc.Graph(className="no-print", id=id_plot, figure=fig, responsive=True,) + dcc.Graph( + className="no-print", + id=id_plot, + figure=fig, + responsive=True, + ) ) return html.Div(children=rendered_plots) @@ -867,15 +877,17 @@ def create_app(results_json, path_sim_output=None): ), html.Div( className="cell imp_info2", - children=[] - if scenario_description == "" - else [ - html.Span( - "Scenario description : ", - style={"font-weight": "bold"}, - ), - f"{scenario_description}", - ], + children=( + [] + if scenario_description == "" + else [ + html.Span( + "Scenario description : ", + style={"font-weight": "bold"}, + ), + f"{scenario_description}", + ] + ), ), html.Div( className="blockoftext", @@ -921,7 +933,8 @@ def create_app(results_json, path_sim_output=None): html.H4(["Project Location"]), html.Iframe( srcDoc=open( - leaflet_map_path, "r", + leaflet_map_path, + "r", ).read(), height="400", ), @@ -934,10 +947,12 @@ def create_app(results_json, path_sim_output=None): ), html.Img( id="staticmapimage", - src="" - if MAP_STATIC is None - else "data:image/png;base64,{}".format( - MAP_STATIC.decode() + src=( + "" + if MAP_STATIC is None + else "data:image/png;base64,{}".format( + MAP_STATIC.decode() + ) ), width="400px", style={"marginLeft": "30px"}, @@ -1011,7 +1026,9 @@ def create_app(results_json, path_sim_output=None): "With this, the demands are met with the following dispatch schedules:" ), html.Div( - children=ready_flows_plots(dict_values=results_json,) + children=ready_flows_plots( + dict_values=results_json, + ) ), html.Div( className="add-cap-plot", @@ -1028,7 +1045,8 @@ def create_app(results_json, path_sim_output=None): insert_subsection( title="Sankey diagram", content=ready_sankey_diagram( - dict_values=results_json, only_print=False, + dict_values=results_json, + only_print=False, ), ), insert_subsection( @@ -1042,7 +1060,8 @@ def create_app(results_json, path_sim_output=None): html.Div( className="add-pie-plots", children=ready_costs_pie_plots( - dict_values=results_json, only_print=False, + dict_values=results_json, + only_print=False, ), ), ], @@ -1065,7 +1084,9 @@ def create_app(results_json, path_sim_output=None): children=[ html.Div( className="cell", - children=[insert_headings(heading_text="Logging Messages"),], + children=[ + insert_headings(heading_text="Logging Messages"), + ], ), html.Div( children=[ diff --git a/src/multi_vector_simulator/cli.py b/src/multi_vector_simulator/cli.py index 340e1808c..793df9f20 100644 --- a/src/multi_vector_simulator/cli.py +++ b/src/multi_vector_simulator/cli.py @@ -172,7 +172,8 @@ def main(**kwargs): print("") logging.debug("Accessing script: D0_modelling_and_optimization") results_meta, results_main = D0.run_oemof( - dict_values, save_energy_system_graph=save_energy_system_graph, + dict_values, + save_energy_system_graph=save_energy_system_graph, ) print("") @@ -189,7 +190,6 @@ def main(**kwargs): def report(pdf=None, path_simulation_output_json=None, path_pdf_report=None): - """Display the report of a MVS simulation Command line use: diff --git a/src/multi_vector_simulator/utils/__init__.py b/src/multi_vector_simulator/utils/__init__.py index dc374ba76..38ed770d9 100644 --- a/src/multi_vector_simulator/utils/__init__.py +++ b/src/multi_vector_simulator/utils/__init__.py @@ -45,7 +45,9 @@ class ParameterDocumentation: """Helper to access a parameter's information given its variable name""" def __init__( - self, param_info_file, label_header="label", + self, + param_info_file, + label_header="label", ): self.param_doc = pd.read_csv(param_info_file).set_index(label_header) self.label_hdr = label_header diff --git a/src/multi_vector_simulator/utils/constants.py b/src/multi_vector_simulator/utils/constants.py index 086ae026f..a44b9bf5d 100644 --- a/src/multi_vector_simulator/utils/constants.py +++ b/src/multi_vector_simulator/utils/constants.py @@ -2,6 +2,7 @@ General Constants ================= """ + import os from copy import deepcopy @@ -163,7 +164,12 @@ LIFETIME, SPECIFIC_COSTS_OM, ], - SIMULATION_SETTINGS: [EVALUATED_PERIOD, OUTPUT_LP_FILE, START_DATE, TIMESTEP,], + SIMULATION_SETTINGS: [ + EVALUATED_PERIOD, + OUTPUT_LP_FILE, + START_DATE, + TIMESTEP, + ], PROJECT_DATA: [ COUNTRY, LATITUDE, @@ -174,7 +180,12 @@ SCENARIO_NAME, SCENARIO_DESCRIPTION, ], - ECONOMIC_DATA: [CURR, DISCOUNTFACTOR, PROJECT_DURATION, TAX,], + ECONOMIC_DATA: [ + CURR, + DISCOUNTFACTOR, + PROJECT_DURATION, + TAX, + ], } # list of csv filename which must be present within the CSV_ELEMENTS folder @@ -261,19 +272,26 @@ DEFAULT_VALUE: 0, UNIT: TYPE_FLOAT, WARNING_TEXT: "allows calculating the total emissions of the energy system (Values: Float). ", - REQUIRED_IN_CSV_ELEMENTS: [ENERGY_PRODUCTION, ENERGY_PROVIDERS,], + REQUIRED_IN_CSV_ELEMENTS: [ + ENERGY_PRODUCTION, + ENERGY_PROVIDERS, + ], }, MAXIMUM_EMISSIONS: { DEFAULT_VALUE: DEFAULT_CONSTRAINT_VALUES[MAXIMUM_EMISSIONS][VALUE], UNIT: TYPE_NONE, WARNING_TEXT: "allows setting a maximum amount of emissions of the optimized energy system (Values: None/Float). ", - REQUIRED_IN_CSV_ELEMENTS: [CONSTRAINTS,], + REQUIRED_IN_CSV_ELEMENTS: [ + CONSTRAINTS, + ], }, MINIMAL_DEGREE_OF_AUTONOMY: { DEFAULT_VALUE: DEFAULT_CONSTRAINT_VALUES[MINIMAL_DEGREE_OF_AUTONOMY][VALUE], UNIT: TYPE_FLOAT, WARNING_TEXT: "allows setting a minimum degree of autonomy of the optimized energy system (Values: Float). ", - REQUIRED_IN_CSV_ELEMENTS: [CONSTRAINTS,], + REQUIRED_IN_CSV_ELEMENTS: [ + CONSTRAINTS, + ], }, SCENARIO_DESCRIPTION: { DEFAULT_VALUE: "", @@ -285,31 +303,69 @@ DEFAULT_VALUE: DEFAULT_CONSTRAINT_VALUES[NET_ZERO_ENERGY][VALUE], UNIT: TYPE_BOOL, WARNING_TEXT: "allows to add a net zero energy constraint to optimization problem (activate by setting to `True`). ", - REQUIRED_IN_CSV_ELEMENTS: [CONSTRAINTS,], + REQUIRED_IN_CSV_ELEMENTS: [ + CONSTRAINTS, + ], }, } ENERGY_CARRIER_UNIT = "energy_carrier_unit" DEFAULT_WEIGHTS_ENERGY_CARRIERS = { - "LNG": {UNIT: "kWh_eleq/kg", VALUE: 12.69270292, ENERGY_CARRIER_UNIT: "kg",}, - "Crude_oil": {UNIT: "kWh_eleq/kg", VALUE: 11.63042204, ENERGY_CARRIER_UNIT: "kg",}, + "LNG": { + UNIT: "kWh_eleq/kg", + VALUE: 12.69270292, + ENERGY_CARRIER_UNIT: "kg", + }, + "Crude_oil": { + UNIT: "kWh_eleq/kg", + VALUE: 11.63042204, + ENERGY_CARRIER_UNIT: "kg", + }, "Diesel": { UNIT: "kWh_eleq/l", VALUE: 9.48030688, ENERGY_CARRIER_UNIT: "l", }, # https://epact.energy.gov/fuel-conversion-factors, conversion gallon->4.546092 l - "Kerosene": {UNIT: "kWh_eleq/l", VALUE: 8.908073954, ENERGY_CARRIER_UNIT: "l",}, - "Gasoline": {UNIT: "kWh_eleq/l", VALUE: 8.735753974, ENERGY_CARRIER_UNIT: "l",}, - "LPG": {UNIT: "kWh_eleq/l", VALUE: 6.472821609, ENERGY_CARRIER_UNIT: "l",}, - "Ethane": {UNIT: "kWh_eleq/l", VALUE: 5.149767951, ENERGY_CARRIER_UNIT: "l",}, + "Kerosene": { + UNIT: "kWh_eleq/l", + VALUE: 8.908073954, + ENERGY_CARRIER_UNIT: "l", + }, + "Gasoline": { + UNIT: "kWh_eleq/l", + VALUE: 8.735753974, + ENERGY_CARRIER_UNIT: "l", + }, + "LPG": { + UNIT: "kWh_eleq/l", + VALUE: 6.472821609, + ENERGY_CARRIER_UNIT: "l", + }, + "Ethane": { + UNIT: "kWh_eleq/l", + VALUE: 5.149767951, + ENERGY_CARRIER_UNIT: "l", + }, "H2": { UNIT: "kWh_eleq/kgH2", VALUE: 33.47281985, ENERGY_CARRIER_UNIT: "kgH2", }, # https://epact.energy.gov/fuel-conversion-factors - "Electricity": {UNIT: "kWh_eleq/kWh_el", VALUE: 1, ENERGY_CARRIER_UNIT: "kWh_el",}, - "Biodiesel": {UNIT: "kWh_eleq/l", VALUE: 0.06290669, ENERGY_CARRIER_UNIT: "l",}, - "Ethanol": {UNIT: "kWh_eleq/l", VALUE: 0.04242544, ENERGY_CARRIER_UNIT: "l",}, + "Electricity": { + UNIT: "kWh_eleq/kWh_el", + VALUE: 1, + ENERGY_CARRIER_UNIT: "kWh_el", + }, + "Biodiesel": { + UNIT: "kWh_eleq/l", + VALUE: 0.06290669, + ENERGY_CARRIER_UNIT: "l", + }, + "Ethanol": { + UNIT: "kWh_eleq/l", + VALUE: 0.04242544, + ENERGY_CARRIER_UNIT: "l", + }, "Natural_gas": { UNIT: "kWh_eleq/m3", VALUE: 0.00933273, diff --git a/src/multi_vector_simulator/utils/data_parser.py b/src/multi_vector_simulator/utils/data_parser.py index 96ecb9fab..09399fd9e 100644 --- a/src/multi_vector_simulator/utils/data_parser.py +++ b/src/multi_vector_simulator/utils/data_parser.py @@ -153,7 +153,12 @@ EPA_PARAM_KEYS = { PROJECT_DATA: [PROJECT_ID, PROJECT_NAME, SCENARIO_ID, SCENARIO_NAME], SIMULATION_SETTINGS: [START_DATE, EVALUATED_PERIOD, TIMESTEP, OUTPUT_LP_FILE], - KPI: [KPI_SCALARS_DICT, KPI_UNCOUPLED_DICT, KPI_COST_MATRIX, KPI_SCALAR_MATRIX,], + KPI: [ + KPI_SCALARS_DICT, + KPI_UNCOUPLED_DICT, + KPI_COST_MATRIX, + KPI_SCALAR_MATRIX, + ], "raw_results": ["index", "columns", "data"], "simulation_results": ["logs"], } @@ -304,7 +309,7 @@ def convert_epa_params_to_mvs(epa_dict): - `ENERGY_PRODUCTION`: - Default value for `EMISSION_FACTOR` added - `DISPATCHABILITY` is always `False`, as no dispatchable fuel assets possible right now. Must be tackeld by EPA. - """ + """ epa_dict = deepcopy(epa_dict) dict_values = {} @@ -413,9 +418,9 @@ def convert_epa_params_to_mvs(epa_dict): subasset[SOC_INITIAL] = {VALUE: None, UNIT: TYPE_NONE} # move the optimize cap property from STORAGE_CAPACITY to the asset level if OPTIMIZE_CAP in subasset: - dict_asset[asset_label][ - OPTIMIZE_CAP - ] = subasset.pop(OPTIMIZE_CAP) + dict_asset[asset_label][OPTIMIZE_CAP] = ( + subasset.pop(OPTIMIZE_CAP) + ) # move the unit outside the timeseries dict if TIMESERIES in dict_asset[asset_label]: @@ -500,7 +505,9 @@ def convert_epa_params_to_mvs(epa_dict): dict_asset[asset_label][DSM] = False # Dispatchability of energy consumption assets always False dict_asset[asset_label].update( - {DISPATCHABILITY: {UNIT: TYPE_BOOL, VALUE: False},} + { + DISPATCHABILITY: {UNIT: TYPE_BOOL, VALUE: False}, + } ) if asset_group == ENERGY_PRODUCTION or ENERGY_PROVIDERS: @@ -544,9 +551,9 @@ def convert_epa_params_to_mvs(epa_dict): dict_values[CONSTRAINTS] = {} for missing_constraint in missing_params[CONSTRAINTS]: - dict_values[CONSTRAINTS][ - missing_constraint - ] = DEFAULT_CONSTRAINT_VALUES[missing_constraint] + dict_values[CONSTRAINTS][missing_constraint] = ( + DEFAULT_CONSTRAINT_VALUES[missing_constraint] + ) missing_params.pop(CONSTRAINTS) diff --git a/tests/test_A0_initialization.py b/tests/test_A0_initialization.py index ec92b07c5..6aa454da8 100644 --- a/tests/test_A0_initialization.py +++ b/tests/test_A0_initialization.py @@ -114,7 +114,7 @@ def test_if_csv_opt_and_csv_elements_folder_not_in_input_folder_raise_filenotfou def test_if_csv_opt_path_input_file_set_to_path_input_folder_mvs_csv_config_dot_json( self, m_args, tmpdir ): - """Check that the path_input_file is /mvs_csv_config.json """ + """Check that the path_input_file is /mvs_csv_config.json""" os.mkdir(self.fake_input_path) os.mkdir(os.path.join(self.fake_input_path, CSV_ELEMENTS)) user_inputs = A0.process_user_arguments() diff --git a/tests/test_A1_csv_to_json.py b/tests/test_A1_csv_to_json.py index 512731c6a..395a83391 100644 --- a/tests/test_A1_csv_to_json.py +++ b/tests/test_A1_csv_to_json.py @@ -59,7 +59,10 @@ CSV_EXAMPLE = {"col1": {"param1": "val11", "param2": {VALUE: 21, UNIT: "factor"}}} CSV_TIMESERIES = { - "param1": {VALUE: {FILENAME: "test_time_series.csv", HEADER: "power"}, UNIT: "kW",} + "param1": { + VALUE: {FILENAME: "test_time_series.csv", HEADER: "power"}, + UNIT: "kW", + } } CSV_LIST = { diff --git a/tests/test_B0_data_input_json.py b/tests/test_B0_data_input_json.py index 5dee7768c..9a9a3b3a4 100644 --- a/tests/test_B0_data_input_json.py +++ b/tests/test_B0_data_input_json.py @@ -107,9 +107,26 @@ def test_load_json_removes_json_file_from_inputs_folder(self, m_args): JSON_CSV_PATH, path_output_folder=self.test_out_path, move_copy=True ) - assert os.path.exists(os.path.join(CSV_PATH, CSV_ELEMENTS, CSV_FNAME,)) is False + assert ( + os.path.exists( + os.path.join( + CSV_PATH, + CSV_ELEMENTS, + CSV_FNAME, + ) + ) + is False + ) - assert os.path.exists(os.path.join(CSV_PATH, CSV_FNAME,)) is False + assert ( + os.path.exists( + os.path.join( + CSV_PATH, + CSV_FNAME, + ) + ) + is False + ) @mock.patch( "argparse.ArgumentParser.parse_args", @@ -155,9 +172,15 @@ def setup(self): self.n_days = 4 self.start_date = pd.to_datetime("2018-01-01 00:00:00") self.end_date = self.start_date + pd.DateOffset(days=self.n_days - 1) - self.ti = pd.date_range(start=self.start_date, end=self.end_date, freq="1D",) + self.ti = pd.date_range( + start=self.start_date, + end=self.end_date, + freq="1D", + ) self.ti_long = pd.date_range( - start=self.start_date, end=self.end_date, freq="1H", + start=self.start_date, + end=self.end_date, + freq="1H", ) self.test_dict_series = { "series": { diff --git a/tests/test_C0_data_processing.py b/tests/test_C0_data_processing.py index b19e29cc9..2da717c19 100644 --- a/tests/test_C0_data_processing.py +++ b/tests/test_C0_data_processing.py @@ -633,7 +633,7 @@ def test_process_maximum_cap_constraint_maximumCap_is_0(): def test_process_maximum_cap_constraint_maximumCap_is_int_smaller_than_installed_cap(): - """"The asset has a maximumCap < installedCap which is invalid and being ignored.""" + """ "The asset has a maximumCap < installedCap which is invalid and being ignored.""" maxCap = 10 dict_values = { group: { @@ -672,7 +672,8 @@ def test_process_maximum_cap_constraint_group_is_ENERGY_PRODUCTION_fuel_source() def test_process_maximum_cap_constraint_group_is_ENERGY_PRODUCTION_non_dispatchable_asset(): # ToDo: change assertion errors """The asset belongs to the energy production group, and is a non-dispatchable asset. - As the maximumCap is used to define the maximum capacity of an asset, but used in oemof-solph to limit a flow, the value has to be translated.""" + As the maximumCap is used to define the maximum capacity of an asset, but used in oemof-solph to limit a flow, the value has to be translated. + """ timeseries_peak = 0.8 group = ENERGY_PRODUCTION maxCap = 100 @@ -703,7 +704,13 @@ def test_process_maximum_cap_constraint_subasset(): """For storages, the subassets have to be processes. This tests the procedure examplary.""" dict_values = { group: { - asset: {subasset: {LABEL: asset, UNIT: unit, MAXIMUM_CAP: {VALUE: None},}} + asset: { + subasset: { + LABEL: asset, + UNIT: unit, + MAXIMUM_CAP: {VALUE: None}, + } + } } } @@ -767,10 +774,16 @@ def test_process_normalized_installed_cap(): def test_add_a_transformer_for_each_peak_demand_pricing_period_1_period(): dict_test_trafo = deepcopy(dict_test) dict_availability_timeseries = C0.define_availability_of_peak_demand_pricing_assets( - dict_test_trafo, 1, 12, + dict_test_trafo, + 1, + 12, ) - list_of_dso_energyConversion_assets = C0.add_a_transformer_for_each_peak_demand_pricing_period( - dict_test_trafo, dict_test[ENERGY_PROVIDERS][DSO], dict_availability_timeseries, + list_of_dso_energyConversion_assets = ( + C0.add_a_transformer_for_each_peak_demand_pricing_period( + dict_test_trafo, + dict_test[ENERGY_PROVIDERS][DSO], + dict_availability_timeseries, + ) ) assert ( len(list_of_dso_energyConversion_assets) == 1 @@ -794,10 +807,16 @@ def test_add_a_transformer_for_each_peak_demand_pricing_period_1_period(): def test_add_a_transformer_for_each_peak_demand_pricing_period_2_periods(): dict_test_trafo = deepcopy(dict_test) dict_availability_timeseries = C0.define_availability_of_peak_demand_pricing_assets( - dict_test_trafo, 2, 6, + dict_test_trafo, + 2, + 6, ) - list_of_dso_energyConversion_assets = C0.add_a_transformer_for_each_peak_demand_pricing_period( - dict_test_trafo, dict_test[ENERGY_PROVIDERS][DSO], dict_availability_timeseries, + list_of_dso_energyConversion_assets = ( + C0.add_a_transformer_for_each_peak_demand_pricing_period( + dict_test_trafo, + dict_test[ENERGY_PROVIDERS][DSO], + dict_availability_timeseries, + ) ) assert ( len(list_of_dso_energyConversion_assets) == 2 @@ -1263,7 +1282,10 @@ def test_load_timeseries_from_csv_file_over_TIMESERIES(): LABEL: "Electricity demand", OEMOF_ASSET_TYPE: OEMOF_SINK, UNIT: "kW", - TIMESERIES: {VALUE: [4, 5, 6], DATA_TYPE_JSON_KEY: TYPE_SERIES,}, + TIMESERIES: { + VALUE: [4, 5, 6], + DATA_TYPE_JSON_KEY: TYPE_SERIES, + }, } C0.receive_timeseries_from_csv(settings_dict, dict_asset, input_type="input") assert (dict_asset[TIMESERIES].values == np.array([1, 2, 3])).all() diff --git a/tests/test_C1_verification.py b/tests/test_C1_verification.py index 316e19bbf..4c31847ea 100644 --- a/tests/test_C1_verification.py +++ b/tests/test_C1_verification.py @@ -494,7 +494,7 @@ def test_check_time_series_values_between_0_and_1_False_smaller_0(): @pytest.fixture() def get_dict_vals(): - """ Reads input json file.""" + """Reads input json file.""" with open(os.path.join(JSON_PATH)) as json_file: dict_values = json.load(json_file) # todo welches file??? return dict_values diff --git a/tests/test_D1_model_components.py b/tests/test_D1_model_components.py index 27dca638b..a40fa21a9 100644 --- a/tests/test_D1_model_components.py +++ b/tests/test_D1_model_components.py @@ -49,10 +49,11 @@ TEST_REPO_PATH, TEST_INPUT_DIRECTORY, "inputs_for_D1", JSON_FNAME ) + # fixtures that help creating variables and data needed for the tests @pytest.fixture() def get_json(): - """ Reads input json file. """ + """Reads input json file.""" with open(D1_JSON) as json_file: dict_values = json.load(json_file) yield dict_values @@ -60,7 +61,7 @@ def get_json(): @pytest.fixture() def get_model(): - """ Creates solph.EnergySystem model. """ + """Creates solph.EnergySystem model.""" time_index = pd.date_range( start=pd.to_datetime("2018-01-01 00:00:00"), end=pd.to_datetime("2018-12-31 23:00:00"), @@ -71,7 +72,7 @@ def get_model(): @pytest.fixture() def get_busses(): - """ Creates busses (solph.Bus) dictionary. """ + """Creates busses (solph.Bus) dictionary.""" yield { "Fuel bus": solph.Bus(label="Fuel bus"), "Electricity bus": D1.CustomBus( @@ -90,7 +91,7 @@ def get_busses(): class TestTransformerComponent: @pytest.fixture(autouse=True) def setup_class(self, get_json, get_model, get_busses): - """ Sets up class attributes for the tests. """ + """Sets up class attributes for the tests.""" self.dict_values = get_json self.model = get_model self.transformers = {} @@ -302,7 +303,6 @@ def test_transformer_optimize_cap_multiple_output_busses_multiple_single_efficie bus=self.busses, ) - def test_transformer_fix_cap_single_busses(self): dict_asset = self.dict_values[ENERGY_CONVERSION][ "transformer_fix_single_busses" @@ -315,7 +315,6 @@ def test_transformer_fix_cap_single_busses(self): bus=self.busses, ) - # # only one output and one input bus # assert ( # len([str(i) for i in self.model.entities[-1].outputs]) == 1 @@ -394,7 +393,9 @@ def test_transformer_fix_cap_single_busses(self): optimize=False, dict_asset=dict_asset ) - def test_transformer_fix_cap_multiple_input_busses(self,): + def test_transformer_fix_cap_multiple_input_busses( + self, + ): dict_asset = self.dict_values[ENERGY_CONVERSION][ "transformer_fix_multiple_input_busses" ] @@ -578,7 +579,7 @@ def test_chp_wrong_outflow_bus_energy_vector(self): class TestSinkComponent: @pytest.fixture(autouse=True) def setup_class(self, get_json, get_model, get_busses): - """ Sets up class attributes for the tests. """ + """Sets up class attributes for the tests.""" self.dict_values = get_json self.model = get_model self.busses = get_busses @@ -639,7 +640,10 @@ def test_sink_non_dispatchable_single_input_bus(self): dict_asset[TIMESERIES] = self.time_series D1.sink_non_dispatchable( - model=self.model, dict_asset=dict_asset, sink=self.sinks, bus=self.busses, + model=self.model, + dict_asset=dict_asset, + sink=self.sinks, + bus=self.busses, ) self.helper_test_sink_in_model_and_dict( @@ -651,7 +655,10 @@ def test_sink_non_dispatchable_multiple_input_busses(self): dict_asset[TIMESERIES] = self.time_series D1.sink_non_dispatchable( - model=self.model, dict_asset=dict_asset, sink=self.sinks, bus=self.busses, + model=self.model, + dict_asset=dict_asset, + sink=self.sinks, + bus=self.busses, ) self.helper_test_sink_in_model_and_dict( @@ -662,7 +669,10 @@ def test_sink_dispatchable_single_input_bus(self): dict_asset = self.dict_values[ENERGY_CONSUMPTION]["dispatchable_single"] D1.sink_dispatchable_optimize( - model=self.model, dict_asset=dict_asset, sink=self.sinks, bus=self.busses, + model=self.model, + dict_asset=dict_asset, + sink=self.sinks, + bus=self.busses, ) self.helper_test_sink_in_model_and_dict( @@ -673,7 +683,10 @@ def test_sink_dispatchable_multiple_input_busses(self): dict_asset = self.dict_values[ENERGY_CONSUMPTION]["dispatchable_multiple"] D1.sink_dispatchable_optimize( - model=self.model, dict_asset=dict_asset, sink=self.sinks, bus=self.busses, + model=self.model, + dict_asset=dict_asset, + sink=self.sinks, + bus=self.busses, ) self.helper_test_sink_in_model_and_dict( @@ -684,7 +697,7 @@ def test_sink_dispatchable_multiple_input_busses(self): class TestSourceComponent: @pytest.fixture(autouse=True) def setup_class(self, get_json, get_model, get_busses): - """ Sets up class attributes for the tests. """ + """Sets up class attributes for the tests.""" self.dict_values = get_json self.model = get_model self.busses = get_busses @@ -828,7 +841,9 @@ def test_source_dispatchable_optimize_normalized_timeseries(self): timeseries="normalized", ) - def test_source_dispatchable_optimize_timeseries_not_normalized_timeseries(self,): + def test_source_dispatchable_optimize_timeseries_not_normalized_timeseries( + self, + ): dict_asset = self.dict_values[ENERGY_PRODUCTION]["dispatchable_source_optimize"] dict_asset[TIMESERIES] = self.time_series dict_asset[TIMESERIES_PEAK] = {"unit": "kWp/H", "value": self.time_series.max()} @@ -889,7 +904,7 @@ def test_source_dispatchable_fix_timeseries_not_normalized_timeseries(self): class TestStorageComponent: @pytest.fixture(autouse=True) def setup_class(self, get_json, get_model, get_busses): - """ Sets up class attributes for the tests. """ + """Sets up class attributes for the tests.""" self.dict_values = get_json self.model = get_model self.busses = get_busses diff --git a/tests/test_D2_model_constraints.py b/tests/test_D2_model_constraints.py index fb1b23a31..630531b2c 100644 --- a/tests/test_D2_model_constraints.py +++ b/tests/test_D2_model_constraints.py @@ -111,7 +111,8 @@ def test_prepare_constraint_minimal_renewable_share(): renewable_assets, non_renewable_assets, ) = D2.prepare_constraint_minimal_renewable_share( - dict_values=dict_values, dict_model=dict_model, + dict_values=dict_values, + dict_model=dict_model, ) assert ( @@ -184,7 +185,10 @@ def test_prepare_demand_assets(): OEMOF_BUSSES: {electricity: electricity}, } - demands = D2.prepare_demand_assets(dict_values, dict_model,) + demands = D2.prepare_demand_assets( + dict_values, + dict_model, + ) assert ( demand_profiles in demands @@ -214,7 +218,9 @@ def test_prepare_energy_provider_consumption_sources(): electricity = "Electricity" dso = "DSO" dict_values = { - ENERGY_PROVIDERS: {dso: {LABEL: dso},}, + ENERGY_PROVIDERS: { + dso: {LABEL: dso}, + }, ENERGY_PRODUCTION: { dso + DSO_CONSUMPTION: { @@ -225,12 +231,17 @@ def test_prepare_energy_provider_consumption_sources(): }, } dict_model = { - OEMOF_SOURCE: {dso + DSO_CONSUMPTION: dso + DSO_CONSUMPTION,}, + OEMOF_SOURCE: { + dso + DSO_CONSUMPTION: dso + DSO_CONSUMPTION, + }, OEMOF_BUSSES: {electricity: electricity}, } - energy_provider_consumption_sources = D2.prepare_energy_provider_consumption_sources( - dict_values, dict_model, + energy_provider_consumption_sources = ( + D2.prepare_energy_provider_consumption_sources( + dict_values, + dict_model, + ) ) DSO_source_name = dict_values[ENERGY_PROVIDERS][dso][LABEL] + DSO_CONSUMPTION @@ -264,7 +275,9 @@ def test_prepare_energy_provider_feedin_sinks(): electricity = "Electricity" dso = "DSO" dict_values = { - ENERGY_PROVIDERS: {dso: {LABEL: dso},}, + ENERGY_PROVIDERS: { + dso: {LABEL: dso}, + }, ENERGY_CONSUMPTION: { dso + DSO_FEEDIN: { @@ -275,12 +288,15 @@ def test_prepare_energy_provider_feedin_sinks(): }, } dict_model = { - OEMOF_SINK: {dso + DSO_FEEDIN: dso + DSO_FEEDIN,}, + OEMOF_SINK: { + dso + DSO_FEEDIN: dso + DSO_FEEDIN, + }, OEMOF_BUSSES: {electricity: electricity}, } energy_provider_feedin_sinks = D2.prepare_energy_provider_feedin_sinks( - dict_values, dict_model, + dict_values, + dict_model, ) DSO_sink_name = dict_values[ENERGY_PROVIDERS][dso][LABEL] + DSO_FEEDIN @@ -366,7 +382,8 @@ def test_constraint_maximum_emissions(self): """Checks if maximum emissions limit is properly added as a constraint""" # Create a solph model using the input values (especially the constraints setup as class variables above) model = D2.constraint_maximum_emissions( - model=solph.Model(self.model), dict_values=self.dict_values, + model=solph.Model(self.model), + dict_values=self.dict_values, ) assert ( model.integral_limit_emission_factor_constraint.upper.value diff --git a/tests/test_E1_process_results.py b/tests/test_E1_process_results.py index 1a24199a7..7a949b04a 100644 --- a/tests/test_E1_process_results.py +++ b/tests/test_E1_process_results.py @@ -322,7 +322,9 @@ def test_add_info_flows_storage_capacity(): ), f"Parameter {parameter} should have been added to the dict_asset." if parameter == FLOW: assert_series_equal( - dict_test[FLOW].astype(np.int64), flow, check_names=False, + dict_test[FLOW].astype(np.int64), + flow, + check_names=False, ) else: assert ( @@ -358,7 +360,9 @@ def test_add_info_flows_1_day(): ), f"Parameter {parameter} should have been added to the dict_asset with an {VALUE}." assert_series_equal( - dict_test[FLOW].astype(np.int64), flow, check_names=False, + dict_test[FLOW].astype(np.int64), + flow, + check_names=False, ) assert dict_test[TOTAL_FLOW][VALUE] == sum( flow @@ -393,7 +397,9 @@ def test_add_info_flows_365_days(): ), f"Parameter {parameter} should have been added to the dict_asset with an {VALUE}." assert_series_equal( - dict_test[FLOW].astype(np.int64), flow, check_names=False, + dict_test[FLOW].astype(np.int64), + flow, + check_names=False, ) assert dict_test[TOTAL_FLOW][VALUE] == sum( flow diff --git a/tests/test_E2_economics.py b/tests/test_E2_economics.py index 8bcba000d..a00fed0a2 100644 --- a/tests/test_E2_economics.py +++ b/tests/test_E2_economics.py @@ -163,10 +163,12 @@ def test_calculate_operation_and_management_expenditures(): installed_capacity = 10 optimized_add_capacity = 10 specific_om_cost = 5 - operation_and_management_expenditures = E2.calculate_operation_and_management_expenditures( - specific_om_cost=specific_om_cost, - installed_capacity=installed_capacity, - optimized_add_capacity=optimized_add_capacity, + operation_and_management_expenditures = ( + E2.calculate_operation_and_management_expenditures( + specific_om_cost=specific_om_cost, + installed_capacity=installed_capacity, + optimized_add_capacity=optimized_add_capacity, + ) ) assert operation_and_management_expenditures == specific_om_cost * ( installed_capacity + optimized_add_capacity diff --git a/tests/test_E3_indicator_calculation.py b/tests/test_E3_indicator_calculation.py index e893e2de7..793f05122 100644 --- a/tests/test_E3_indicator_calculation.py +++ b/tests/test_E3_indicator_calculation.py @@ -96,7 +96,10 @@ def test_totalling_scalars_values(): dict_renewable_energy_use = { ENERGY_PRODUCTION: { dso - + DSO_CONSUMPTION: {ENERGY_VECTOR: electricity, TOTAL_FLOW: {VALUE: flow_dso},}, + + DSO_CONSUMPTION: { + ENERGY_VECTOR: electricity, + TOTAL_FLOW: {VALUE: flow_dso}, + }, pv_plant: { ENERGY_VECTOR: electricity, TOTAL_FLOW: {VALUE: flow_pv_local}, @@ -730,7 +733,7 @@ def test_add_degree_of_net_zero_energy(): def test_equation_degree_of_net_zero_energy(): - """ Degree of NZE between 0 and 1.""" + """Degree of NZE between 0 and 1.""" total_feedin = 60 total_grid_consumption = 80 total_demand = 100 diff --git a/tests/test_E4_verification.py b/tests/test_E4_verification.py index a2352289e..13bdda20f 100644 --- a/tests/test_E4_verification.py +++ b/tests/test_E4_verification.py @@ -144,7 +144,7 @@ def test_net_zero_energy_constraint_test_fails(): def test_detect_excessive_excess_generation_in_bus_warning_is_logged(caplog): - """A logging.warning is printed due to excessive excess generation. """ + """A logging.warning is printed due to excessive excess generation.""" bus_label = "Test_bus" dict_values = { "optimizedFlows": { @@ -162,7 +162,7 @@ def test_detect_excessive_excess_generation_in_bus_warning_is_logged(caplog): def test_detect_excessive_excess_generation_in_bus_no_excess(caplog): - """No excessive excess generation takes place. """ + """No excessive excess generation takes place.""" bus_label = "Test_bus" dict_values = { "optimizedFlows": { @@ -183,7 +183,7 @@ def test_detect_excessive_excess_generation_in_bus_no_excess(caplog): def test_detect_excessive_excess_generation_in_bus_several_busses_two_warnings(caplog): - """Excessive excess generation takes place in two busses. """ + """Excessive excess generation takes place in two busses.""" excessive_excess_bus_1, excessive_excess_bus_2 = ( "Bus_excessive_excess_1", "Bus_excessive_excess_2", @@ -198,10 +198,18 @@ def test_detect_excessive_excess_generation_in_bus_several_busses_two_warnings(c } ), excessive_excess_bus_1: pd.DataFrame( - {"inflow": [1, 2, 3], "outflow": [-1, -1, -2], "excess": [0, 1, 1],} + { + "inflow": [1, 2, 3], + "outflow": [-1, -1, -2], + "excess": [0, 1, 1], + } ), excessive_excess_bus_2: pd.DataFrame( - {"inflow": [1, 2, 3], "outflow": [-1, -1, -1], "excess": [0, 1, 2],} + { + "inflow": [1, 2, 3], + "outflow": [-1, -1, -1], + "excess": [0, 1, 2], + } ), } } @@ -218,7 +226,8 @@ def test_detect_excessive_excess_generation_in_bus_several_busses_two_warnings(c def test_verify_state_of_charge_feasible(caplog): """Two cases are tested here Case 1 is no storage components in the energy system, hence no verification carried out - Case 2 is that all the SoC values are physically feasible, so no WARNING log messages""" + Case 2 is that all the SoC values are physically feasible, so no WARNING log messages + """ # Test case: No storage components present in the system, so function is exited before any verification # Make an empty energyStorage dict signifying that there are no storage components in the energy system @@ -243,7 +252,11 @@ def test_verify_state_of_charge_feasible(caplog): storage = "storage_01" # Add the SoC time series to the result JSON nested-dict - dict_values = {ENERGY_STORAGE: {storage: {TIMESERIES_SOC: soc_series},}} + dict_values = { + ENERGY_STORAGE: { + storage: {TIMESERIES_SOC: soc_series}, + } + } # Test for the function's behavior with the current case with caplog.at_level(logging.WARNING): E4.verify_state_of_charge(dict_values=dict_values) diff --git a/tests/test_F0_output.py b/tests/test_F0_output.py index 21e35b7a1..87b0196c7 100644 --- a/tests/test_F0_output.py +++ b/tests/test_F0_output.py @@ -136,7 +136,10 @@ def test_store_scalars_to_excel_two_tabs_no_dict(self): def test_store_each_bus_timeseries_to_excel_and_png_one_bus(self): """ """ dict_timeseries_test_one_bus = { - PROJECT_DATA: {PROJECT_NAME: "a_project", SCENARIO_NAME: "a_scenario",}, + PROJECT_DATA: { + PROJECT_NAME: "a_project", + SCENARIO_NAME: "a_scenario", + }, SIMULATION_SETTINGS: {PATH_OUTPUT_FOLDER: OUTPUT_PATH}, OPTIMIZED_FLOWS: {"a_bus": BUS}, } @@ -154,7 +157,10 @@ def test_store_each_bus_timeseries_to_excel_and_png_one_bus(self): def test_store_each_bus_timeseries_to_excel_and_png_two_busses(self): """ """ dict_timeseries_test_two_busses = { - PROJECT_DATA: {PROJECT_NAME: "a_project", SCENARIO_NAME: "a_scenario",}, + PROJECT_DATA: { + PROJECT_NAME: "a_project", + SCENARIO_NAME: "a_scenario", + }, SIMULATION_SETTINGS: {PATH_OUTPUT_FOLDER: OUTPUT_PATH}, OPTIMIZED_FLOWS: {"a_bus": BUS, "b_bus": BUS}, } @@ -204,7 +210,7 @@ def setup_method(self): ), ) def test_generate_pdf_report(self, m_args): - """Run the simulation with -pdf option to make sure the pdf file is generated """ + """Run the simulation with -pdf option to make sure the pdf file is generated""" main() assert os.path.exists(os.path.join(OUTPUT_PATH, PDF_REPORT)) is True diff --git a/tests/test_benchmark_KPI.py b/tests/test_benchmark_KPI.py index 252cf7271..11e85b61e 100644 --- a/tests/test_benchmark_KPI.py +++ b/tests/test_benchmark_KPI.py @@ -4,6 +4,7 @@ What should differ between the different functions is the input file """ + import argparse import os import shutil @@ -288,16 +289,19 @@ def test_benchmark_Economic_KPI_C2_E2(self, margs): assert ( key in asset_data ), f"{key} is not in the asset data of {asset_group}, {asset}. It includes: {asset_data.keys()}." - if not pd.isna(expected_values.loc[asset, key]) and not pd.isna(asset_data[key][VALUE]): + if not pd.isna(expected_values.loc[asset, key]) and not pd.isna( + asset_data[key][VALUE] + ): assert float(expected_values.loc[asset, key]) == pytest.approx( asset_data[key][VALUE], rel=1e-3 ), f"Parameter {key} of asset {asset} is not of expected value, expected {expected_values.loc[asset, key]}, got {asset_data[key][VALUE]}." - + # Now we established that the externally calculated values are equal to the internally calculated values. # Therefore, we can now use the cost data from the assets to validate the cost data for the whole energy system. demand = pd.read_csv( - os.path.join(TEST_INPUT_PATH, USE_CASE, TIME_SERIES, "demand.csv"), sep=",", + os.path.join(TEST_INPUT_PATH, USE_CASE, TIME_SERIES, "demand.csv"), + sep=",", ) aggregated_demand = demand.sum()[0] diff --git a/tests/test_benchmark_constraints.py b/tests/test_benchmark_constraints.py index c269e9643..a5b851b2f 100644 --- a/tests/test_benchmark_constraints.py +++ b/tests/test_benchmark_constraints.py @@ -4,6 +4,7 @@ What should differ between the different functions is the input file """ + import argparse import os import shutil diff --git a/tests/test_benchmark_scenarios.py b/tests/test_benchmark_scenarios.py index f0bdfa237..c9d502e9f 100644 --- a/tests/test_benchmark_scenarios.py +++ b/tests/test_benchmark_scenarios.py @@ -4,6 +4,7 @@ What should differ between the different functions is the input file """ + import argparse import os import shutil diff --git a/tests/test_utils.py b/tests/test_utils.py index 1b44ee8fd..d6404c08a 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -130,7 +130,7 @@ def test_set_nested_value_with_unexisting_key_at_end_of_path(self): set_nested_value(dct, 400, ("b", "b1", "b12", "b122")) def test_set_nested_value_with_unexisting_key_in_middle_of_path(self): - """because the path diverges """ + """because the path diverges""" dct = dict(a=dict(a1=1, a2=2), b=dict(b1=dict(b11=11, b12=dict(b121=121)))) with self.assertRaises(KeyError): set_nested_value(dct, 400, ("b", "d1", "b12", "b121")) From 2d813225a54557e1d4a4abb673aa65236e839e40 Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Sun, 28 Apr 2024 23:20:10 +0200 Subject: [PATCH 10/18] Fix test within github actions --- tests/test_B0_data_input_json.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_B0_data_input_json.py b/tests/test_B0_data_input_json.py index 9a9a3b3a4..8615f0961 100644 --- a/tests/test_B0_data_input_json.py +++ b/tests/test_B0_data_input_json.py @@ -168,7 +168,7 @@ def teardown_method(self): class TestConversionJsonToPythonTypes: - def setup(self): + def setup_method(self): self.n_days = 4 self.start_date = pd.to_datetime("2018-01-01 00:00:00") self.end_date = self.start_date + pd.DateOffset(days=self.n_days - 1) From b3b5eb38834948bba3a5154cb997283efd37776e Mon Sep 17 00:00:00 2001 From: smartie2076 <44204527+smartie2076@users.noreply.github.com> Date: Tue, 2 Nov 2021 10:41:55 +0100 Subject: [PATCH 11/18] Add reference AHK Chile --- docs/references/publications.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/references/publications.rst b/docs/references/publications.rst index 2ec8811c0..8180ecb57 100644 --- a/docs/references/publications.rst +++ b/docs/references/publications.rst @@ -15,6 +15,11 @@ Articles * :(Hoffmann, 2020b): Martha M. Hoffmann, Sanket Puranik, Marc Juanpera, José M. Martín-Rapún, Heidi Tuiskula, & Philipp Blechinger: *Investment planning in multi-vector energy systems: Definition of key performance indicators*, Conference paper, presented at the CIRED 2020 Berlin Workshop (CIRED 2020), Berlin / online. 2020. DOI: `10.5281/zenodo.4449918 `__ +Reports +^^^^^^^ + +* :(AHK Chile, 2021): Christoph Meyer, Mar Ortiz, Annika Schüttler. AHK Chile, August 2021: German: Einsatz von grünem Wasserstoff zur netzfernen Stromversorgung in Insel- und kleineren Stromnetzen in Chile. Spanish: Uso de hidrógeno verde para el suministro de energía fuera de la red en microrredes y redes pequeñas de electricidad en Chile. Available on: `__ + Posters ^^^^^^^ @@ -52,6 +57,11 @@ H2020 research project open_Plan The `H2020 research project open_plan `__ aims to build an open source tool to plan the design of a single energy cell. It will extend on the existing features of MVS to fullfil the requirements of its pilot projects. The project open_plan is funded until December 2022, the development of the graphical user interface will take place on the `github repository of open_plan `__. +Consulting project with AHK Chile +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The MVS was applied to three case study locations in Chile to determine the local potential to use hydrogen for storing renewable generation. The locations included a grid on an island (Melinka), a region (Aysén) and a industrial site (Multiexport). More information can be found on the `RLI website `__. A report in German and Spanish is available. + .. _reference_bibliography: Bibliography From aac8cf95fa8b119a68db1550d56771d5b325a762 Mon Sep 17 00:00:00 2001 From: smartie2076 <44204527+smartie2076@users.noreply.github.com> Date: Tue, 2 Nov 2021 10:49:13 +0100 Subject: [PATCH 12/18] Add product sheet "Multi-vector simulator" --- docs/references/publications.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/references/publications.rst b/docs/references/publications.rst index 8180ecb57..7097f471c 100644 --- a/docs/references/publications.rst +++ b/docs/references/publications.rst @@ -23,6 +23,8 @@ Reports Posters ^^^^^^^ +* :(E-LAND, 2021): E-LAND, 2021: Multi-vector simulator. Planning the energy supply system of the future. Product sheet. Available: `__ + * :(Hoffmann, 2020a): Martha M. Hoffmann, Sanket Puranik, Marc Juanpera, José M. Martín-Rapún, Heidi Tuiskula, & Philipp Blechinger: *Investment planning in multi-vector energy systems: Definition of key performance indicators*, Conference poster, presented at the CIRED 2020 Berlin Workshop (CIRED 2020), Berlin / online: DOI: `10.5281/zenodo.4449969 `__ Presentations From 5fc66263795674386bdd830188d24e89171cf98f Mon Sep 17 00:00:00 2001 From: smartie2076 <44204527+smartie2076@users.noreply.github.com> Date: Tue, 2 Nov 2021 10:51:31 +0100 Subject: [PATCH 13/18] Add D4.4 No link to report, as not available online! --- docs/references/publications.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/references/publications.rst b/docs/references/publications.rst index 7097f471c..019cdd708 100644 --- a/docs/references/publications.rst +++ b/docs/references/publications.rst @@ -20,6 +20,8 @@ Reports * :(AHK Chile, 2021): Christoph Meyer, Mar Ortiz, Annika Schüttler. AHK Chile, August 2021: German: Einsatz von grünem Wasserstoff zur netzfernen Stromversorgung in Insel- und kleineren Stromnetzen in Chile. Spanish: Uso de hidrógeno verde para el suministro de energía fuera de la red en microrredes y redes pequeñas de electricidad en Chile. Available on: `__ +* :(E-LAND, 2021): Martha Hoffmann, Ciara Dunks, Sabine Haas. May 2021: Innovative Multi-Vector Simulator. Deliverable 4.4 + Posters ^^^^^^^ From 8ba2153970f01a81fd40646c08d1bdb9f1e4ff2f Mon Sep 17 00:00:00 2001 From: smartie2076 <44204527+smartie2076@users.noreply.github.com> Date: Tue, 2 Nov 2021 11:15:05 +0100 Subject: [PATCH 14/18] Update product sheet --- docs/references/publications.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/references/publications.rst b/docs/references/publications.rst index 019cdd708..91030f98d 100644 --- a/docs/references/publications.rst +++ b/docs/references/publications.rst @@ -20,12 +20,12 @@ Reports * :(AHK Chile, 2021): Christoph Meyer, Mar Ortiz, Annika Schüttler. AHK Chile, August 2021: German: Einsatz von grünem Wasserstoff zur netzfernen Stromversorgung in Insel- und kleineren Stromnetzen in Chile. Spanish: Uso de hidrógeno verde para el suministro de energía fuera de la red en microrredes y redes pequeñas de electricidad en Chile. Available on: `__ -* :(E-LAND, 2021): Martha Hoffmann, Ciara Dunks, Sabine Haas. May 2021: Innovative Multi-Vector Simulator. Deliverable 4.4 +* :(E-LAND, 2021b): Martha Hoffmann, Ciara Dunks, Sabine Haas. May 2021: Innovative Multi-Vector Simulator. Deliverable 4.4 Posters ^^^^^^^ -* :(E-LAND, 2021): E-LAND, 2021: Multi-vector simulator. Planning the energy supply system of the future. Product sheet. Available: `__ +* :(E-LAND, 2021a): E-LAND, 2021: Multi-Vector Simulator. Planning the energy supply system of the future. Product sheet. Available: `__ * :(Hoffmann, 2020a): Martha M. Hoffmann, Sanket Puranik, Marc Juanpera, José M. Martín-Rapún, Heidi Tuiskula, & Philipp Blechinger: *Investment planning in multi-vector energy systems: Definition of key performance indicators*, Conference poster, presented at the CIRED 2020 Berlin Workshop (CIRED 2020), Berlin / online: DOI: `10.5281/zenodo.4449969 `__ From 7eadd390da54176f7980e78721fe5bfcafa54d8e Mon Sep 17 00:00:00 2001 From: smartie2076 <44204527+smartie2076@users.noreply.github.com> Date: Mon, 29 Apr 2024 10:29:30 +0200 Subject: [PATCH 15/18] Update publications.rst --- docs/references/publications.rst | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/docs/references/publications.rst b/docs/references/publications.rst index 91030f98d..7854cce24 100644 --- a/docs/references/publications.rst +++ b/docs/references/publications.rst @@ -13,26 +13,34 @@ The MVS is currently under development in the H2020 research project `E-LAND`. S Articles ^^^^^^^^ -* :(Hoffmann, 2020b): Martha M. Hoffmann, Sanket Puranik, Marc Juanpera, José M. Martín-Rapún, Heidi Tuiskula, & Philipp Blechinger: *Investment planning in multi-vector energy systems: Definition of key performance indicators*, Conference paper, presented at the CIRED 2020 Berlin Workshop (CIRED 2020), Berlin / online. 2020. DOI: `10.5281/zenodo.4449918 `__ +* :(Farrukh, 2022): Farhan Farrukh, Ciara Dunks, Martha Marie Hoffmann, Per Olav Dypvik: *Assessment of the potential of local solar generation for providing ship shore power in the Norwegian harbour Port of Borg*, 2022 18th International Conference on the European Energy Market (EEM), DOI: 10.1109/EEM54602.2022.9921031, `Link `__ + +* :(Puranik, 2022): Sanket Puranik, Martha M. Hoffmann, Farhan Farrukh, Sunil Sharma: *Optimal investments into rooftop solar and batteries for a distribution grid company and prosumers: A case study in India.*, Conference Paper, 2022 IEEE 7th International Energy Conference (ENERGYCON). DOI: 10.1109/ENERGYCON53164.2022.9830341. `Link `__ + +* :(Hoffmann, 2020b): Martha M. Hoffmann, Sanket Puranik, Marc Juanpera, José M. Martín-Rapún, Heidi Tuiskula, & Philipp Blechinger: *Investment planning in multi-vector energy systems: Definition of key performance indicators*, Conference paper, presented at the CIRED 2020 Berlin Workshop (CIRED 2020), Berlin / online. 2020. DOI: `Link <10.5281/zenodo.4449918 `__ Reports ^^^^^^^ -* :(AHK Chile, 2021): Christoph Meyer, Mar Ortiz, Annika Schüttler. AHK Chile, August 2021: German: Einsatz von grünem Wasserstoff zur netzfernen Stromversorgung in Insel- und kleineren Stromnetzen in Chile. Spanish: Uso de hidrógeno verde para el suministro de energía fuera de la red en microrredes y redes pequeñas de electricidad en Chile. Available on: `__ +* :(AHK Chile, 2021): Christoph Meyer, Mar Ortiz, Annika Schüttler. AHK Chile, August 2021: German: Einsatz von grünem Wasserstoff zur netzfernen Stromversorgung in Insel- und kleineren Stromnetzen in Chile. Spanish: Uso de hidrógeno verde para el suministro de energía fuera de la red en microrredes y redes pequeñas de electricidad en Chile. Available on: `Link `__ * :(E-LAND, 2021b): Martha Hoffmann, Ciara Dunks, Sabine Haas. May 2021: Innovative Multi-Vector Simulator. Deliverable 4.4 Posters ^^^^^^^ -* :(E-LAND, 2021a): E-LAND, 2021: Multi-Vector Simulator. Planning the energy supply system of the future. Product sheet. Available: `__ +* :(E-LAND, 2021a): E-LAND, 2021: Multi-Vector Simulator. Planning the energy supply system of the future. Product sheet. Available: `Link `__ * :(Hoffmann, 2020a): Martha M. Hoffmann, Sanket Puranik, Marc Juanpera, José M. Martín-Rapún, Heidi Tuiskula, & Philipp Blechinger: *Investment planning in multi-vector energy systems: Definition of key performance indicators*, Conference poster, presented at the CIRED 2020 Berlin Workshop (CIRED 2020), Berlin / online: DOI: `10.5281/zenodo.4449969 `__ Presentations ^^^^^^^^^^^^^ -* :(Hoffmann, 2020c): *Introducing an os simulation tool for sector-coupled energy sys opt: Multi-Vector Simulator (MVS)* Presentation at Energy Modelling Platform for Europe (EMP-E) 2020, 06. – 08. October 2020, online. Link to session: `Youtube `__ +* : (Puranik, 2021): Sanket Puranik, Martha M. Hoffmann, Isidoros Kokos, Sergio Herraiz, Per Gjerløw: *Facilitating Local Multi-Vector Energy Systems with the E-LAND toolbox*, Workshop at "Sustainable Places 2021", `Link `__ + +* :(Herraiz, 2021): Sergio Herraiz, Martha M. Hoffmann: *Facilitating local multi-vector energy systems: Integrated investment and operational planning.* Präsentation in Session 8: Cross-sectoral linkages and integration, Day 2: Linking Sectors and Technologies at the EMP-E 2021 (26-28. Oktober 2021), online. `conference schedule < https://emp-e-2021.b2match.io/page-4451>`__ + +* :(Hoffmann, 2020c): *Introducing an open source simulation tool for sector-coupled energy system optimization: Multi-Vector Simulator (MVS)* Presentation at Energy Modelling Platform for Europe (EMP-E) 2020, 06. – 08. October 2020, online. Link to session: `Youtube `__ * :(Hoffmann, 2020d): *Multi-Vector Simulator*, session: *Building on experience: What to take from individual models for the oemof-community*, presentation at oemof developer meeting, 02. - 04. December 2020, online. Link: `conference schedule `__ From 70c12f12da253a3a3d5e8296247d50904b163223 Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Thu, 2 May 2024 14:12:53 +0200 Subject: [PATCH 16/18] Fix link target --- docs/references/publications.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/references/publications.rst b/docs/references/publications.rst index 7854cce24..48685b141 100644 --- a/docs/references/publications.rst +++ b/docs/references/publications.rst @@ -38,7 +38,7 @@ Presentations * : (Puranik, 2021): Sanket Puranik, Martha M. Hoffmann, Isidoros Kokos, Sergio Herraiz, Per Gjerløw: *Facilitating Local Multi-Vector Energy Systems with the E-LAND toolbox*, Workshop at "Sustainable Places 2021", `Link `__ -* :(Herraiz, 2021): Sergio Herraiz, Martha M. Hoffmann: *Facilitating local multi-vector energy systems: Integrated investment and operational planning.* Präsentation in Session 8: Cross-sectoral linkages and integration, Day 2: Linking Sectors and Technologies at the EMP-E 2021 (26-28. Oktober 2021), online. `conference schedule < https://emp-e-2021.b2match.io/page-4451>`__ +* :(Herraiz, 2021): Sergio Herraiz, Martha M. Hoffmann: *Facilitating local multi-vector energy systems: Integrated investment and operational planning.* Präsentation in Session 8: Cross-sectoral linkages and integration, Day 2: Linking Sectors and Technologies at the EMP-E 2021 (26-28. Oktober 2021), online. `conference schedule `__ * :(Hoffmann, 2020c): *Introducing an open source simulation tool for sector-coupled energy system optimization: Multi-Vector Simulator (MVS)* Presentation at Energy Modelling Platform for Europe (EMP-E) 2020, 06. – 08. October 2020, online. Link to session: `Youtube `__ From f90c9dc285005cd08426a16b2bd5b13c8d3cb5fe Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Fri, 3 May 2024 10:47:21 +0200 Subject: [PATCH 17/18] Update changelog --- CHANGELOG.md | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f395941e5..081fe384f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,19 +18,29 @@ Here is a template for new release sections ### Fixed - ``` - ## [Unreleased] ### Added +- +### Changed +- +### Removed +- +### Fixed +- -- Introduce reducable demand. It should be listed within sinks, and provided an efficiency (number between 0 and 1). This efficiency correspond to the percent of the demand which must be provided (critical demand). The oemof-solph sinks which models the non-critical part of the demand has very small variable_costs such that it should not influence the costs calculations but should be fulfilled rather than dumping energy into excess sinks. Developed for the server version. (#969) +## [1.1.1] - 2024-05-03 + +### Added +- Introduce reducable demand. It should be listed within sinks, and provided an efficiency (number between 0 and 1). This efficiency correspond to the percent of the demand which must be provided (critical demand). The oemof-solph sinks which models the non-critical part of the demand has very small variable_costs such that it should not influence the costs calculations but should be fulfilled rather than dumping energy into excess sinks. Developed for the server version. (#969) +- Possibility to set min and or max for the load of a transformer and solve for a non-convex optimization. The simulation time can be quite long with cbc solver! Developed for the server version. (#969) ### Changed - Add costs to excess sinks of busses. If the dictionary containing the information about the bus contains a key "price", its value will be applied to the variable costs of the sink (unit of the price is currency/energy unit, default currency/kWh). Developed for the server version. (#969) -### Fixed +- black version was bumped to 24.3.0 (#971) ## [1.1.0] - 2024-04-27 From 03bf1fa0a9819aaae275a1cad344d60682a42b2d Mon Sep 17 00:00:00 2001 From: "pierre-francois.duc" Date: Fri, 3 May 2024 11:43:28 +0200 Subject: [PATCH 18/18] Bump version number --- src/multi_vector_simulator/version.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/multi_vector_simulator/version.py b/src/multi_vector_simulator/version.py index 849ff8155..150ffb164 100644 --- a/src/multi_vector_simulator/version.py +++ b/src/multi_vector_simulator/version.py @@ -1,4 +1,4 @@ # versioning scheme: Major release.Minor release.Patches -version_num = "1.1.0rc2" +version_num = "1.1.1" # date format iso8601: YYYY-MM-DD -version_date = "2024-04-27" +version_date = "2024-05-03"