From b1f532771d6553a9ab4983dd8338d6197e306da8 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 3 Jan 2018 06:08:05 -1000 Subject: [PATCH 01/51] rename 'project' to 'GENERATION_PROJECT' in gen_inc_heat_rate.tab and bump version to enable data upgrade; also fix bugs in upgrade module --- .../inputs/switch_inputs_version.txt | 2 +- .../inputs/switch_inputs_version.txt | 2 +- .../inputs/switch_inputs_version.txt | 2 +- examples/ccs/inputs/switch_inputs_version.txt | 2 +- .../inputs/switch_inputs_version.txt | 2 +- .../inputs/switch_inputs_version.txt | 2 +- .../inputs/switch_inputs_version.txt | 2 +- .../inputs/switch_inputs_version.txt | 2 +- .../inputs/switch_inputs_version.txt | 2 +- .../inputs/switch_inputs_version.txt | 2 +- .../inputs/switch_inputs_version.txt | 2 +- .../inputs/switch_inputs_version.txt | 2 +- .../inputs/switch_inputs_version.txt | 2 +- .../inputs/switch_inputs_version.txt | 2 +- .../1plant/inputs/switch_inputs_version.txt | 2 +- .../3plants/inputs/switch_inputs_version.txt | 2 +- .../4plants/inputs/switch_inputs_version.txt | 2 +- .../inputs/switch_inputs_version.txt | 2 +- .../inputs/switch_inputs_version.txt | 2 +- .../inputs/gen_inc_heat_rates.tab | 2 +- .../inputs/switch_inputs_version.txt | 2 +- .../unit_commit/inputs/gen_inc_heat_rates.tab | 2 +- .../inputs/switch_inputs_version.txt | 2 +- .../inputs/switch_inputs_version.txt | 2 +- .../storage/inputs/switch_inputs_version.txt | 2 +- .../generators/core/commit/fuel_use.py | 2 +- switch_model/upgrade/manager.py | 39 ++++++++++----- switch_model/upgrade/upgrade_2_0_0b4.py | 48 +++++++++++++++++++ 28 files changed, 100 insertions(+), 39 deletions(-) create mode 100644 switch_model/upgrade/upgrade_2_0_0b4.py diff --git a/examples/3zone_toy/inputs/switch_inputs_version.txt b/examples/3zone_toy/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/3zone_toy/inputs/switch_inputs_version.txt +++ b/examples/3zone_toy/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/3zone_toy_stochastic_PySP/inputs/switch_inputs_version.txt b/examples/3zone_toy_stochastic_PySP/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/3zone_toy_stochastic_PySP/inputs/switch_inputs_version.txt +++ b/examples/3zone_toy_stochastic_PySP/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/carbon_cap/inputs/switch_inputs_version.txt b/examples/carbon_cap/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/carbon_cap/inputs/switch_inputs_version.txt +++ b/examples/carbon_cap/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/ccs/inputs/switch_inputs_version.txt b/examples/ccs/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/ccs/inputs/switch_inputs_version.txt +++ b/examples/ccs/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/copperplate0/inputs/switch_inputs_version.txt b/examples/copperplate0/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/copperplate0/inputs/switch_inputs_version.txt +++ b/examples/copperplate0/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/copperplate1/inputs/switch_inputs_version.txt b/examples/copperplate1/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/copperplate1/inputs/switch_inputs_version.txt +++ b/examples/copperplate1/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/custom_extension/inputs/switch_inputs_version.txt b/examples/custom_extension/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/custom_extension/inputs/switch_inputs_version.txt +++ b/examples/custom_extension/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/discrete_and_min_build/inputs/switch_inputs_version.txt b/examples/discrete_and_min_build/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/discrete_and_min_build/inputs/switch_inputs_version.txt +++ b/examples/discrete_and_min_build/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/discrete_build/inputs/switch_inputs_version.txt b/examples/discrete_build/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/discrete_build/inputs/switch_inputs_version.txt +++ b/examples/discrete_build/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/dr_simple/inputs/switch_inputs_version.txt b/examples/dr_simple/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/dr_simple/inputs/switch_inputs_version.txt +++ b/examples/dr_simple/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/hydro_simple/inputs/switch_inputs_version.txt b/examples/hydro_simple/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/hydro_simple/inputs/switch_inputs_version.txt +++ b/examples/hydro_simple/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/hydro_system/inputs/switch_inputs_version.txt b/examples/hydro_system/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/hydro_system/inputs/switch_inputs_version.txt +++ b/examples/hydro_system/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/new_builds_only/inputs/switch_inputs_version.txt b/examples/new_builds_only/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/new_builds_only/inputs/switch_inputs_version.txt +++ b/examples/new_builds_only/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/planning_reserves/inputs/switch_inputs_version.txt b/examples/planning_reserves/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/planning_reserves/inputs/switch_inputs_version.txt +++ b/examples/planning_reserves/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/production_cost_models/1plant/inputs/switch_inputs_version.txt b/examples/production_cost_models/1plant/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/production_cost_models/1plant/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/1plant/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/production_cost_models/3plants/inputs/switch_inputs_version.txt b/examples/production_cost_models/3plants/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/production_cost_models/3plants/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/3plants/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/production_cost_models/4plants/inputs/switch_inputs_version.txt b/examples/production_cost_models/4plants/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/production_cost_models/4plants/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/4plants/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/production_cost_models/4plants_with_unserved_load/inputs/switch_inputs_version.txt b/examples/production_cost_models/4plants_with_unserved_load/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/production_cost_models/4plants_with_unserved_load/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/4plants_with_unserved_load/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/production_cost_models/discrete_unit_commit/inputs/switch_inputs_version.txt b/examples/production_cost_models/discrete_unit_commit/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/production_cost_models/discrete_unit_commit/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/discrete_unit_commit/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/production_cost_models/spinning_reserves/inputs/gen_inc_heat_rates.tab b/examples/production_cost_models/spinning_reserves/inputs/gen_inc_heat_rates.tab index c005d284c..a44733ea9 100644 --- a/examples/production_cost_models/spinning_reserves/inputs/gen_inc_heat_rates.tab +++ b/examples/production_cost_models/spinning_reserves/inputs/gen_inc_heat_rates.tab @@ -1,4 +1,4 @@ -project power_start_mw power_end_mw incremental_heat_rate_mbtu_per_mwhr fuel_use_rate_mmbtu_per_h +GENERATION_PROJECT power_start_mw power_end_mw incremental_heat_rate_mbtu_per_mwhr fuel_use_rate_mmbtu_per_h S-NG_CC 40 . . 269.4069 S-NG_CC 40 100.0 6.684885 . S-NG_GT 0 . . 0.1039 diff --git a/examples/production_cost_models/spinning_reserves/inputs/switch_inputs_version.txt b/examples/production_cost_models/spinning_reserves/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/production_cost_models/spinning_reserves/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/spinning_reserves/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/production_cost_models/unit_commit/inputs/gen_inc_heat_rates.tab b/examples/production_cost_models/unit_commit/inputs/gen_inc_heat_rates.tab index c005d284c..a44733ea9 100644 --- a/examples/production_cost_models/unit_commit/inputs/gen_inc_heat_rates.tab +++ b/examples/production_cost_models/unit_commit/inputs/gen_inc_heat_rates.tab @@ -1,4 +1,4 @@ -project power_start_mw power_end_mw incremental_heat_rate_mbtu_per_mwhr fuel_use_rate_mmbtu_per_h +GENERATION_PROJECT power_start_mw power_end_mw incremental_heat_rate_mbtu_per_mwhr fuel_use_rate_mmbtu_per_h S-NG_CC 40 . . 269.4069 S-NG_CC 40 100.0 6.684885 . S-NG_GT 0 . . 0.1039 diff --git a/examples/production_cost_models/unit_commit/inputs/switch_inputs_version.txt b/examples/production_cost_models/unit_commit/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/production_cost_models/unit_commit/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/unit_commit/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/rps_simple/inputs/switch_inputs_version.txt b/examples/rps_simple/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/rps_simple/inputs/switch_inputs_version.txt +++ b/examples/rps_simple/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/examples/storage/inputs/switch_inputs_version.txt b/examples/storage/inputs/switch_inputs_version.txt index 0f3f84ca6..94789474f 100644 --- a/examples/storage/inputs/switch_inputs_version.txt +++ b/examples/storage/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b2 +2.0.0b4 diff --git a/switch_model/generators/core/commit/fuel_use.py b/switch_model/generators/core/commit/fuel_use.py index 8c71f18d3..396c27288 100644 --- a/switch_model/generators/core/commit/fuel_use.py +++ b/switch_model/generators/core/commit/fuel_use.py @@ -183,7 +183,7 @@ def load_inputs(mod, switch_data, inputs_dir): path = os.path.join(inputs_dir, 'gen_inc_heat_rates.tab') if os.path.isfile(path): (fuel_rate_segments, min_load, full_hr) = _parse_inc_heat_rate_file( - path, id_column="project") + path, id_column="GENERATION_PROJECT") # Check implied minimum loading level for consistency with # gen_min_load_fraction if gen_min_load_fraction was provided. If # gen_min_load_fraction wasn't provided, set it to implied minimum diff --git a/switch_model/upgrade/manager.py b/switch_model/upgrade/manager.py index e7983b912..c9613c9d1 100644 --- a/switch_model/upgrade/manager.py +++ b/switch_model/upgrade/manager.py @@ -12,6 +12,7 @@ import upgrade_2_0_0b1 import upgrade_2_0_0b2 +import upgrade_2_0_0b4 # Available upgrade code. This needs to be in consecutive order so # upgrade_inputs can incrementally apply the upgrades. @@ -22,20 +23,27 @@ (upgrade_2_0_0b2, upgrade_2_0_0b2.upgrades_from, upgrade_2_0_0b2.upgrades_to), + (upgrade_2_0_0b4, + upgrade_2_0_0b4.upgrades_from, + upgrade_2_0_0b4.upgrades_to), ] +# Not every code revision requires an update, so we hard-code the last +# revision that required an update. +last_required_update = '2.0.0b4' code_version = StrictVersion(switch_model.__version__) version_file = 'switch_inputs_version.txt' #verbose = False verbose = True -def scan_and_upgrade(top_dir, input_dir_name = 'inputs'): +def scan_and_upgrade(top_dir, inputs_dir_name='inputs', backup=True): for dirpath, dirnames, filenames in os.walk(top_dir): for dirname in dirnames: path = os.path.join(dirpath, dirname) - if os.path.exists(os.path.join(path, input_dir_name, 'modules.txt')): - upgrade_inputs(os.path.join(path, input_dir_name), verbose) + if os.path.exists(os.path.join(path, inputs_dir_name, 'modules.txt')): + # print_verbose('upgrading {}'.format(os.path.join(path, inputs_dir_name))) + upgrade_inputs(os.path.join(path, inputs_dir_name), backup) def get_input_version(inputs_dir): @@ -83,7 +91,6 @@ def do_inputs_need_upgrade(inputs_dir): # Not every code revision requires an update, so just hard-code the last # revision that required an update. inputs_version = get_input_version(inputs_dir) - last_required_update = '2.0.0b2' return StrictVersion(inputs_version) < StrictVersion(last_required_update) @@ -118,31 +125,37 @@ def upgrade_inputs(inputs_dir, backup=True): upgrader.upgrade_input_dir(inputs_dir) print_verbose('\tFinished upgrading ' + inputs_dir + '\n') else: - print_verbose('Skipped ' + inputs_dir + ' it does not need upgrade.') + print_verbose('Skipped ' + inputs_dir + '; it does not need upgrade.') def main(args=None): if args is None: + # note: we don't pass the args object directly to scan_and_upgrade or upgrade_inputs + # because those may be called from elsewhere with custom arguments parser = argparse.ArgumentParser() add_parser_args(parser) args = parser.parse_args() set_verbose(args.verbose) - if args.recusive: - scan_and_upgrade(args.path) + if args.recursive: + scan_and_upgrade('.', args.inputs_dir_name, args.backup) else: - if not os.path.isdir(args.path): - print("Error: Input directory {} does not exist.".format(args.path)) + if not os.path.isdir(args.inputs_dir_name): + print("Error: Input directory {} does not exist.".format(args.inputs_dir_name)) return -1 - upgrade_inputs(os.path.normpath(args.path)) + upgrade_inputs(os.path.normpath(args.inputs_dir_name), args.backup) def set_verbose(verbosity): global verbose verbose = verbosity def add_parser_args(parser): - parser.add_argument("--path", type=str, default="inputs", - help='Input directory path (default is "inputs")') - parser.add_argument("--recursive", dest="recusive", + parser.add_argument("--inputs-dir-name", type=str, default="inputs", + help='Input directory name (default is "inputs")') + parser.add_argument("--backup", action='store_true', default=True, + help='Make backup of inputs directory before upgrading (set true by default)') + parser.add_argument("--no-backup", action='store_false', dest='backup', + help='Do not make backup of inputs directory before upgrading') + parser.add_argument("--recursive", dest="recursive", default=False, action='store_true', help=('Recursively scan the provided path for inputs directories ' 'named "inputs", and upgrade each directory found. Note, this ' diff --git a/switch_model/upgrade/upgrade_2_0_0b4.py b/switch_model/upgrade/upgrade_2_0_0b4.py new file mode 100644 index 000000000..f65102fbe --- /dev/null +++ b/switch_model/upgrade/upgrade_2_0_0b4.py @@ -0,0 +1,48 @@ +# Copyright (c) 2015-2017 The Switch Authors. All rights reserved. +# Licensed under the Apache License, Version 2.0, which is in the LICENSE file. + +""" +Upgrade input directories from 2.0.0b2 to 2.0.0b4. (There were no changes for 2.0.0b3) +Changes are: +* rename 'project' column to 'GENERATION_PROJECT' in 'gen_inc_heat_rates.tab' file. +""" + +import os, shutil, argparse +import pandas +import switch_model.upgrade + +upgrades_from = '2.0.0b2' +upgrades_to = '2.0.0b4' + +def upgrade_input_dir(inputs_dir): + """ + Upgrade an input directory. + """ + + def rename_file(old_name, new_name, optional_file=True): + old_path = os.path.join(inputs_dir, old_name) + new_path = os.path.join(inputs_dir, new_name) + if optional_file and not os.path.isfile(old_path): + return + shutil.move(old_path, new_path) + + def rename_column(file_name, old_col_name, new_col_name, optional_file=True): + path = os.path.join(inputs_dir, file_name) + if optional_file and not os.path.isfile(path): + return + df = pandas.read_csv(path, na_values=['.'], sep='\t') + df.rename(columns={old_col_name: new_col_name}, inplace=True) + df.to_csv(path, sep='\t', na_rep='.', index=False) + + old_new_column_names_in_file = { + 'gen_inc_heat_rates.tab': [('project', 'GENERATION_PROJECT')] + } + + for fname, old_new_pairs in old_new_column_names_in_file.iteritems(): + for old_new_pair in old_new_pairs: + old = old_new_pair[0] + new = old_new_pair[1] + rename_column(fname, old_col_name=old, new_col_name=new) + + # Write a new version text file. + switch_model.upgrade._write_input_version(inputs_dir, upgrades_to) From 0cf919b308ed13538994944c83b1f8cff023223d Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 3 Jan 2018 12:50:54 -1000 Subject: [PATCH 02/51] fix regression in definition of GEN_PERIODS --- switch_model/generators/core/build.py | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/switch_model/generators/core/build.py b/switch_model/generators/core/build.py index 8fcbfa7d0..cb052f98f 100644 --- a/switch_model/generators/core/build.py +++ b/switch_model/generators/core/build.py @@ -327,11 +327,22 @@ def BuildGen_assign_default_value(m, g, bld_yr): mod.PREDETERMINED_GEN_BLD_YRS, rule=BuildGen_assign_default_value) + # note: in pull request 78, commit e7f870d..., GEN_PERIODS + # was mistakenly redefined as GENERATION_PROJECTS * PERIODS. + # That didn't directly affect the objective function in the tests + # because most code uses GEN_TPS, which was defined correctly. + # But it did have some subtle effects on the main Hawaii model. + # It would be good to have a test that this set is correct, + # e.g., assertions that in the 3zone_toy model, + # ('C-Coal_ST', 2020) in m.GEN_PERIODS and ('C-Coal_ST', 2030) not in m.GEN_PERIODS + # and 'C-Coal_ST' in m.GENS_IN_PERIOD[2020] and 'C-Coal_ST' not in m.GENS_IN_PERIOD[2030] mod.GEN_PERIODS = Set( dimen=2, - initialize=mod.GENERATION_PROJECTS * mod.PERIODS) + initialize=lambda m: + [(g, p) for g in m.GENERATION_PROJECTS for p in m.PERIODS_FOR_GEN[g]]) + mod.GenCapacity = Expression( - mod.GEN_PERIODS, + mod.GENERATION_PROJECTS, mod.PERIODS, rule=lambda m, g, period: sum( m.BuildGen[g, bld_yr] for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, period])) @@ -393,12 +404,12 @@ def BuildGen_assign_default_value(m, g, bld_yr): crf(m.interest_rate, m.gen_max_age[g]))) mod.GenCapitalCosts = Expression( - mod.GEN_PERIODS, + mod.GENERATION_PROJECTS, mod.PERIODS, rule=lambda m, g, p: sum( m.BuildGen[g, bld_yr] * m.gen_capital_cost_annual[g, bld_yr] for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, p])) mod.GenFixedOMCosts = Expression( - mod.GEN_PERIODS, + mod.GENERATION_PROJECTS, mod.PERIODS, rule=lambda m, g, p: sum( m.BuildGen[g, bld_yr] * m.gen_fixed_om[g, bld_yr] for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, p])) @@ -494,12 +505,10 @@ def load_inputs(mod, switch_data, inputs_dir): def post_solve(instance, outdir): - # write_table returns a tuple instead of expanding the indexes, so use - # "gp" for the tuple instead of "g, p" for the components. write_table( - instance, instance.GEN_PERIODS, + instance, instance.GENERATION_PROJECTS, instance.PERIODS, output_file=os.path.join(outdir, "gen_cap.txt"), headings=("GENERATION_PROJECT", "PERIOD", "GenCapacity", "GenCapitalCosts", "GenFixedOMCosts"), - values=lambda m, gp: gp + (m.GenCapacity[gp], m.GenCapitalCosts[gp], - m.GenFixedOMCosts[gp])) + values=lambda m, g, p: (g, p, m.GenCapacity[g, p], m.GenCapitalCosts[g, p], + m.GenFixedOMCosts[g, p])) From 32efce45d4cc07501d6d89b6e09c9999db24aa8f Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 3 Jan 2018 12:58:28 -1000 Subject: [PATCH 03/51] bump switch version number to match data version --- switch_model/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/switch_model/version.py b/switch_model/version.py index 228d73dad..b48ca99f7 100644 --- a/switch_model/version.py +++ b/switch_model/version.py @@ -6,4 +6,4 @@ installed and executed in environments that don't have any dependencies installed. """ -__version__='2.0.0b3' \ No newline at end of file +__version__='2.0.0b4' \ No newline at end of file From 90d88a877a0b19d8a59e0b9614994e6e0f98b155 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 3 Jan 2018 13:08:00 -1000 Subject: [PATCH 04/51] minor bug fixes and enhancements --- switch_model/balancing/demand_response/simple.py | 4 ++-- switch_model/generators/core/build.py | 7 ++++++- switch_model/solve.py | 2 +- switch_model/solve_scenarios.py | 2 +- 4 files changed, 10 insertions(+), 5 deletions(-) mode change 100644 => 100755 switch_model/solve.py mode change 100644 => 100755 switch_model/solve_scenarios.py diff --git a/switch_model/balancing/demand_response/simple.py b/switch_model/balancing/demand_response/simple.py index b9ae78bb7..1595df72c 100644 --- a/switch_model/balancing/demand_response/simple.py +++ b/switch_model/balancing/demand_response/simple.py @@ -71,9 +71,9 @@ def define_components(mod): rule=lambda m, z, ts: sum(m.ShiftDemand[z, t] for t in m.TPS_IN_TS[ts]) == 0.0) - if 'Distributed_Power_Withdrawals' in dir(mod): + try: mod.Distributed_Power_Withdrawals.append('ShiftDemand') - else: + except AttributeError: mod.Zone_Power_Withdrawals.append('ShiftDemand') diff --git a/switch_model/generators/core/build.py b/switch_model/generators/core/build.py index cb052f98f..2d11e4b02 100644 --- a/switch_model/generators/core/build.py +++ b/switch_model/generators/core/build.py @@ -242,7 +242,7 @@ def define_components(mod): filter=lambda m, g: m.gen_uses_fuel[g]) mod.gen_full_load_heat_rate = Param( mod.FUEL_BASED_GENS, - within=PositiveReals) + within=NonNegativeReals) mod.MULTIFUEL_GENS = Set( initialize=mod.GENERATION_PROJECTS, filter=lambda m, g: m.gen_energy_source[g] == "multiple") @@ -298,6 +298,11 @@ def _gen_build_can_operate_in_period(m, g, build_year, period): bld_yr for (gen, bld_yr) in m.GEN_BLD_YRS if gen == g and _gen_build_can_operate_in_period(m, g, bld_yr, period))) + # The set of periods when a generator is available to run + mod.PERIODS_FOR_GEN = Set( + mod.GENERATION_PROJECTS, + initialize=lambda m, g: [p for p in m.PERIODS if len(m.BLD_YRS_FOR_GEN_PERIOD[g, p]) > 0] + ) def bounds_BuildGen(model, g, bld_yr): if((g, bld_yr) in model.PREDETERMINED_GEN_BLD_YRS): diff --git a/switch_model/solve.py b/switch_model/solve.py old mode 100644 new mode 100755 index 59c8d8479..235221030 --- a/switch_model/solve.py +++ b/switch_model/solve.py @@ -569,7 +569,7 @@ def solve(model): model.solutions.load_from(results) # Only return if the model solved correctly, otherwise throw a useful error - if(results.solver.status == SolverStatus.ok and + if(results.solver.status in {SolverStatus.ok, SolverStatus.warning} and results.solver.termination_condition == TerminationCondition.optimal): return results elif (results.solver.termination_condition == TerminationCondition.infeasible): diff --git a/switch_model/solve_scenarios.py b/switch_model/solve_scenarios.py old mode 100644 new mode 100755 index f2ea718b4..adbb44f8c --- a/switch_model/solve_scenarios.py +++ b/switch_model/solve_scenarios.py @@ -269,7 +269,7 @@ def unlock_running_scenarios(): interrupted = f.read().splitlines() for scenario_name in interrupted: try: - os.rmdir(scenario_name) + os.rmdir(os.path.join(scenario_queue_dir, scenario_name)) except OSError as e: if e.errno != 2: # no such file raise From 35a79e4a6c594671e0f6d296679d78386e659525 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 3 Jan 2018 13:11:01 -1000 Subject: [PATCH 05/51] unpack tuples in indexes in reporting.write_table(), similar to Pyomo --- switch_model/generators/extensions/storage.py | 4 +- switch_model/reporting/__init__.py | 45 ++++++++++++++++--- 2 files changed, 40 insertions(+), 9 deletions(-) diff --git a/switch_model/generators/extensions/storage.py b/switch_model/generators/extensions/storage.py index af67230fc..6d319461b 100644 --- a/switch_model/generators/extensions/storage.py +++ b/switch_model/generators/extensions/storage.py @@ -220,7 +220,7 @@ def post_solve(instance, outdir): headings=("project", "period", "load_zone", "IncrementalPowerCapacityMW", "IncrementalEnergyCapacityMWh", "OnlinePowerCapacityMW", "OnlineEnergyCapacityMWh" ), - values=lambda m, (g, bld_yr): ( + values=lambda m, g, bld_yr: ( g, bld_yr, m.gen_load_zone[g], m.BuildGen[g, bld_yr], m.BuildStorageEnergy[g, bld_yr], m.GenCapacity[g, bld_yr], m.StorageEnergyCapacity[g, bld_yr] @@ -230,7 +230,7 @@ def post_solve(instance, outdir): output_file=os.path.join(outdir, "storage_dispatch.txt"), headings=("project", "timepoint", "load_zone", "ChargeMW", "DischargeMW", "StateOfCharge"), - values=lambda m, (g, t): ( + values=lambda m, g, t: ( g, m.tp_timestamp[t], m.gen_load_zone[g], m.ChargeStorage[g, t], m.DispatchGen[g, t], m.StateOfCharge[g, t] diff --git a/switch_model/reporting/__init__.py b/switch_model/reporting/__init__.py index 6e55aec77..0aa9a84b5 100644 --- a/switch_model/reporting/__init__.py +++ b/switch_model/reporting/__init__.py @@ -49,9 +49,7 @@ def write_table(instance, *indexes, **kwargs): headings = kwargs["headings"] values = kwargs["values"] digits = kwargs.get('digits', 6) - # create a master indexing set - # this is a list of lists, even if only one list was specified - idx = itertools.product(*indexes) + with open(output_file, 'wb') as f: w = csv.writer(f, dialect="ampl-tab") # write header row @@ -67,10 +65,43 @@ def format_row(row): else: row[i] = sig_digits.format(v) return tuple(row) - w.writerows( - format_row(row=values(instance, *x)) - for x in idx - ) + + try: + w.writerows( + format_row(row=values(instance, *unpack_elements(x))) + for x in itertools.product(*indexes) + ) + except TypeError: # lambda got wrong number of arguments + # use old code, which doesn't unpack the indices + w.writerows( + # TODO: flatten x (unpack tuples) like Pyomo before calling values() + # That may cause problems elsewhere though... + + format_row(row=values(instance, *x)) + for x in itertools.product(*indexes) + ) + print "DEPRECATION WARNING: switch_model.reporting.write_table() was called with a function" + print "that expects multidimensional index values to be stored in tuples, but Switch now unpacks" + print "these tuples automatically. Please update your code to work with unpacked index values." + print "Problem occured with {}.".format(values.func_code) + +def unpack_elements(it): + """Unpack any multi-element objects within it, to make a single flat list. + Note: this is not recursive. + This is used to flatten the product of a multi-dimensional index with anything else.""" + l=[] + for t in it: + if isinstance(t, basestring): + l.append(t) + else: + try: + # check if it's iterable + iterator = iter(t) + for i in iterator: + l.append(i) + except TypeError: + l.append(t) + return l def make_iterable(item): From affa2ae629351c08e3bc1ea4c6dd06928c426659 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 3 Jan 2018 13:30:10 -1000 Subject: [PATCH 06/51] allow zero values for transmission capacity and other transmission parameters --- switch_model/transmission/transport/build.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/switch_model/transmission/transport/build.py b/switch_model/transmission/transport/build.py index 84aee96a7..88ed5d14f 100644 --- a/switch_model/transmission/transport/build.py +++ b/switch_model/transmission/transport/build.py @@ -176,11 +176,10 @@ def define_components(mod): mod.trans_lz2 = Param(mod.TRANSMISSION_LINES, within=mod.LOAD_ZONES) mod.min_data_check('TRANSMISSION_LINES', 'trans_lz1', 'trans_lz2') mod.trans_dbid = Param(mod.TRANSMISSION_LINES, default=lambda m, tx: tx) - mod.trans_length_km = Param(mod.TRANSMISSION_LINES, within=PositiveReals) + mod.trans_length_km = Param(mod.TRANSMISSION_LINES, within=NonNegativeReals) mod.trans_efficiency = Param( mod.TRANSMISSION_LINES, - within=PositiveReals, - validate=lambda m, val, tx: val <= 1) + within=PercentFraction) mod.BLD_YRS_FOR_EXISTING_TX = Set( dimen=2, initialize=lambda m: set( @@ -225,9 +224,8 @@ def bounds_BuildTx(model, tx, bld_yr): if tx2 == tx and (bld_yr == 'Legacy' or bld_yr <= period))) mod.trans_derating_factor = Param( mod.TRANSMISSION_LINES, - within=NonNegativeReals, - default=1, - validate=lambda m, val, tx: val <= 1) + within=PercentFraction, + default=1) mod.TxCapacityNameplateAvailable = Expression( mod.TRANSMISSION_LINES, mod.PERIODS, rule=lambda m, tx, period: ( @@ -238,13 +236,13 @@ def bounds_BuildTx(model, tx, bld_yr): default=1, validate=lambda m, val, tx: val >= 0.5 and val <= 3) mod.trans_capital_cost_per_mw_km = Param( - within=PositiveReals, + within=NonNegativeReals, default=1000) mod.trans_lifetime_yrs = Param( - within=PositiveReals, + within=NonNegativeReals, default=20) mod.trans_fixed_o_m_fraction = Param( - within=PositiveReals, + within=NonNegativeReals, default=0.03) # Total annual fixed costs for building new transmission lines... # Multiply capital costs by capital recover factor to get annual @@ -252,7 +250,7 @@ def bounds_BuildTx(model, tx, bld_yr): # overnight costs. mod.trans_cost_annual = Param( mod.TRANSMISSION_LINES, - within=PositiveReals, + within=NonNegativeReals, initialize=lambda m, tx: ( m.trans_capital_cost_per_mw_km * m.trans_terrain_multiplier[tx] * m.trans_length_km[tx] * (crf(m.interest_rate, m.trans_lifetime_yrs) + From c42825412ef7641bbed62ca7fb1d8e3c6ae640e4 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 3 Jan 2018 14:21:28 -1000 Subject: [PATCH 07/51] change hawaii modules to match 2016-12-23 PSIP; fix renaming errors --- switch_model/hawaii/fed_subsidies.py | 6 +- switch_model/hawaii/lng_conversion.py | 8 +- .../hawaii/{psip.py => psip_2016_04.py} | 0 switch_model/hawaii/psip_2016_12.py | 337 ++++++++++++++++++ switch_model/hawaii/reserves.py | 24 +- switch_model/hawaii/save_results.py | 291 ++++++++------- switch_model/hawaii/scenario_data.py | 28 +- switch_model/hawaii/unserved_load.py | 3 +- 8 files changed, 547 insertions(+), 150 deletions(-) rename switch_model/hawaii/{psip.py => psip_2016_04.py} (100%) create mode 100644 switch_model/hawaii/psip_2016_12.py diff --git a/switch_model/hawaii/fed_subsidies.py b/switch_model/hawaii/fed_subsidies.py index 42f0a3df8..ccb8454ba 100644 --- a/switch_model/hawaii/fed_subsidies.py +++ b/switch_model/hawaii/fed_subsidies.py @@ -18,7 +18,7 @@ def define_components(m): # note: here we assume that existing projects and new (unbuilt) projects # are defined separately - m.NEW_GENECTS = Set(initialize=lambda m: set(p for (p, y) in m.NEW_GEN_BLD_YRS)) + m.NEW_GENS = Set(initialize=lambda m: set(p for (p, y) in m.NEW_GEN_BLD_YRS)) # model the wind production tax credit m.Wind_Subsidy_Hourly = Expression( @@ -26,7 +26,7 @@ def define_components(m): rule=lambda m, t: -wind_prod_tax_credit * sum( m.DispatchGen[p, t] for p in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[wind_energy_source] - if p in m.NEW_GENECTS and (p, t) in m.GEN_TPS + if p in m.NEW_GENS and (p, t) in m.GEN_TPS ) ) m.Cost_Components_Per_TP.append('Wind_Subsidy_Hourly') @@ -36,7 +36,7 @@ def define_components(m): -solar_invest_tax_credit * sum( m.BuildGen[g, bld_yr] * m.gen_capital_cost_annual[g, bld_yr] for g in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[solar_energy_source] - if g in m.NEW_GENECTS + if g in m.NEW_GENS for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, pe])) # # another version: # m.Solar_Credit_Annual = Expression(m.PERIODS, rule=lambda m, pe: diff --git a/switch_model/hawaii/lng_conversion.py b/switch_model/hawaii/lng_conversion.py index 1146b11f3..2ad6e5a61 100644 --- a/switch_model/hawaii/lng_conversion.py +++ b/switch_model/hawaii/lng_conversion.py @@ -104,7 +104,7 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # list of all projects and timepoints when LNG could potentially be used - m.LNG_GENECT_TIMEPOINTS = Set(dimen=2, initialize = lambda m: + m.LNG_GEN_TIMEPOINTS = Set(dimen=2, initialize = lambda m: ((p, t) for p in m.GENERATION_PROJECTS_BY_FUEL['LNG'] for t in m.TIMEPOINTS if (p, t) in m.GEN_TPS) ) @@ -125,7 +125,7 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): 'Oahu_CC_383', 'Oahu_CC_152', 'Oahu_CT_100' ] ) - m.LNG_In_Converted_Plants_Only = Constraint(m.LNG_GENECT_TIMEPOINTS, + m.LNG_In_Converted_Plants_Only = Constraint(m.LNG_GEN_TIMEPOINTS, rule=lambda m, g, tp: Constraint.Skip if g in m.LNG_CONVERTED_PLANTS else (m.GenFuelUseRate[g, tp, 'LNG'] == 0) @@ -195,7 +195,7 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # lng_market_exhausted = 1 - m.LNG_Has_Slack[rfm, m.tp_period[tp]] # return (non_lng_fuel <= big_gect_lng * lng_market_exhausted) # m.Only_LNG_In_Converted_Plants = Constraint( - # m.LNG_GENECT_TIMEPOINTS, + # m.LNG_GEN_TIMEPOINTS, # rule=Only_LNG_In_Converted_Plants_rule # ) @@ -226,7 +226,7 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # ) # return rule # m.Force_Converted_Plants_On = Constraint( - # m.LNG_GENECT_TIMEPOINTS, + # m.LNG_GEN_TIMEPOINTS, # rule=Force_Converted_Plants_On_rule # ) diff --git a/switch_model/hawaii/psip.py b/switch_model/hawaii/psip_2016_04.py similarity index 100% rename from switch_model/hawaii/psip.py rename to switch_model/hawaii/psip_2016_04.py diff --git a/switch_model/hawaii/psip_2016_12.py b/switch_model/hawaii/psip_2016_12.py new file mode 100644 index 000000000..2f0eafbaa --- /dev/null +++ b/switch_model/hawaii/psip_2016_12.py @@ -0,0 +1,337 @@ +from __future__ import division +import os +from pyomo.environ import * + +def define_arguments(argparser): + argparser.add_argument('--psip-force', action='store_true', default=True, + help="Force following of PSIP plans (retiring AES and building certain technologies).") + argparser.add_argument('--psip-relax', dest='psip_force', action='store_false', + help="Relax PSIP plans, to find a more optimal strategy.") + argparser.add_argument('--psip-minimal-renewables', action='store_true', default=False, + help="Use only the amount of renewables shown in PSIP plans, and no more (should be combined with --psip-relax).") + argparser.add_argument('--force-build', nargs=3, default=None, + help="Force construction of at least a certain quantity of a particular technology during certain years. Space-separated list of year, technology and quantity.") + +def define_components(m): + ################### + # resource rules to match HECO's 2016-04-01 PSIP + ################## + + # decide whether to enforce the PSIP preferred plan + # if an environment variable is set, that takes precedence + # (e.g., on a cluster to override options.txt) + psip_env_var = os.environ.get('USE_PSIP_PLAN') + if psip_env_var is None: + # no environment variable; use the --psip-relax flag + psip = m.options.psip_force + elif psip_env_var.lower() in ["1", "true", "y", "yes", "on"]: + psip = True + elif psip_env_var.lower() in ["0", "false", "n", "no", "off"]: + psip = False + else: + raise ValueError('Unrecognized value for environment variable USE_PSIP_PLAN={} (should be 0 or 1)'.format(psip_env_var)) + + if psip: + print "Using PSIP construction plan." + else: + print "Relaxing PSIP construction plan." + + # don't allow addition of anything other than those specified here + # force retirement of AES at end of 2022 + + # these plants are all multi-fuel; will automatically convert to biodiesel in 2045: + # CIP CT-1, W9, W10, Airport DSG, Schofield, IC_Barge, IC_MCBH, Kalaeloa + + # no use of LNG + + # force battery installations directly (since they're not currently a standard tech) + + # NOTE: RESOLVE used different wind and solar profiles from SWITCH. + # SWITCH profiles seem to be more accurate, so we optimize against them + # and show that this may give (small) savings vs. the RESOLVE plan. + + # targets for individual generation technologies + # (year, technology, MW added) + # TODO: allow either CentralFixedPV or CentralTrackingPV for utility-scale solar + # (not urgent now, since CentralFixedPV is not currently in the model) + + def annual(start_year, end_year, start_amount, end_amount): + # should be applied to each year including end year, but not start year + return (end_amount-start_amount)/(end_year-start_year) + + # technologies that are definitely being built (we assume near-term + # are underway and military projects are being built for their own + # reasons) + technology_targets_definite = [ + (2016, 'CentralTrackingPV', 27.6), # Waianae Solar by Eurus Energy America + (2018, 'IC_Schofield', 54.0), + + # Distributed PV from Table J-1 of PSIP + # TODO: check that this matches Resolve inputs + # This is treated as definite, so we don't get caught up in "you could save + # a little money by building Central PV instead of distributed." Probably + # appropriate, since it's a forecast, not a decision anyway. + (2016, 'DistPV', 471 - 444), # net of 444 MW of pre-existing DistPV, also counted in 2016 + + (2017, 'DistPV', annual(2016, 2020, 471, 856)), + (2018, 'DistPV', annual(2016, 2020, 471, 856)), + (2019, 'DistPV', annual(2016, 2020, 471, 856)), + (2020, 'DistPV', annual(2016, 2020, 471, 856)), + + (2021, 'DistPV', annual(2020, 2030, 856, 1169)), + (2022, 'DistPV', annual(2020, 2030, 856, 1169)), + (2023, 'DistPV', annual(2020, 2030, 856, 1169)), + (2024, 'DistPV', annual(2020, 2030, 856, 1169)), + (2025, 'DistPV', annual(2020, 2030, 856, 1169)), + (2026, 'DistPV', annual(2020, 2030, 856, 1169)), + (2027, 'DistPV', annual(2020, 2030, 856, 1169)), + (2028, 'DistPV', annual(2020, 2030, 856, 1169)), + (2029, 'DistPV', annual(2020, 2030, 856, 1169)), + (2030, 'DistPV', annual(2020, 2030, 856, 1169)), + + (2031, 'DistPV', annual(2030, 2040, 1169, 1517)), + (2032, 'DistPV', annual(2030, 2040, 1169, 1517)), + (2033, 'DistPV', annual(2030, 2040, 1169, 1517)), + (2034, 'DistPV', annual(2030, 2040, 1169, 1517)), + (2035, 'DistPV', annual(2030, 2040, 1169, 1517)), + (2036, 'DistPV', annual(2030, 2040, 1169, 1517)), + (2037, 'DistPV', annual(2030, 2040, 1169, 1517)), + (2038, 'DistPV', annual(2030, 2040, 1169, 1517)), + (2039, 'DistPV', annual(2030, 2040, 1169, 1517)), + (2040, 'DistPV', annual(2030, 2040, 1169, 1517)), + + (2041, 'DistPV', annual(2040, 2045, 1517, 1697)), + (2042, 'DistPV', annual(2040, 2045, 1517, 1697)), + (2043, 'DistPV', annual(2040, 2045, 1517, 1697)), + (2044, 'DistPV', annual(2040, 2045, 1517, 1697)), + (2045, 'DistPV', annual(2040, 2045, 1517, 1697)), + # replace prebuilt capacity (counted in 2016, so retired in 2041) + (2041, 'DistPV', 444), + # replace PSIP capacity built before 2020, which was counted in 2020 (retires in 2045) + (2045, 'DistPV', 856-444), + ] + # technologies proposed in PSIP but which may not be built if a + # better plan is found + technology_targets_psip = [ + (2018, 'OnshoreWind', 24), # Na Pua Makani (NPM) wind + (2018, 'CentralTrackingPV', 109.6), # replacement for canceled SunEdison projects + (2018, 'OnshoreWind', 10), # CBRE wind + (2018, 'CentralTrackingPV', 15), # CBRE PV + (2019, 'CentralTrackingPV', 20), # West Loch PV + (2020, 'CentralTrackingPV', 180), + (2022, 'CentralTrackingPV', 40), + (2022, 'IC_Barge', 100.0), # JBPHH plant + # note: we moved IC_MCBH one year earlier than PSIP to reduce infeasibility in 2022 + (2022, 'IC_MCBH', 54.0), + (2025, 'CentralTrackingPV', 200), + (2025, 'OffshoreWind', 200), + (2040, 'CentralTrackingPV', 280), + (2045, 'CentralTrackingPV', 1180), + (2045, 'IC_MCBH', 68.0), # proxy for 68 MW of generic ICE capacity + # restrict construction of batteries + (2022, 'LoadShiftBattery', 426), + (2025, 'LoadShiftBattery', 29), + (2030, 'LoadShiftBattery', 165), + (2035, 'LoadShiftBattery', 168), + (2040, 'LoadShiftBattery', 420), + (2045, 'LoadShiftBattery', 1525), + ] + + if m.options.force_build is not None: + b = list(m.options.force_build) + b[0] = int(b[0]) # year + b[2] = float(b[2]) # quantity + b = tuple(b) + print "Forcing build: {}".format(b) + technology_targets_definite.append(b) + + # make sure LNG is turned off + if psip and getattr(m.options, "force_lng_tier", []) != ["none"]: + raise RuntimeError('You must use the lng_conversion module and set "--force-lng-tier none" to match the PSIP.') + + if psip: + technology_targets = technology_targets_definite + technology_targets_psip + else: + technology_targets = technology_targets_definite + + # make a special list including all standard generation technologies plus "LoadShiftBattery" + m.GEN_TECHS_AND_BATTERIES = Set(initialize=lambda m: [g for g in m.GENERATION_TECHNOLOGIES] + ["LoadShiftBattery"]) + + def technology_target_init(m, per, tech): + """Find the amount of each technology that is targeted to be built by the start of each period.""" + start = 2000 if per == m.PERIODS.first() else m.PERIODS.prev(per) + end = per + target = sum( + mw for (tyear, ttech, mw) in technology_targets + if ttech == tech and start < tyear and tyear <= end + ) + return target + m.technology_target = Param(m.PERIODS, m.GEN_TECHS_AND_BATTERIES, initialize=technology_target_init) + + # with PSIP: BuildGen is zero except for technology_targets + # (sum during each period or before first period) + # without PSIP: BuildGen is >= definite targets + def Enforce_Technology_Target_rule(m, per, tech): + """Enforce targets for each technology; exact target for PSIP cases, minimum target for non-PSIP.""" + + def adjust_psip_credit(g, target): + if g in m.DISCRETELY_SIZED_GENS and target > 0.0: + # Rescale so that the n integral units that come closest + # to the target gets counted as the n.n fractional units + # needed to exactly meet the target. + # This is needed because some of the targets are based on + # nominal unit sizes rather than actual max output. + return (target / m.gen_unit_size[g]) / round(target / m.gen_unit_size[g]) + else: + return 1.0 + + target = m.technology_target[per, tech] + + if tech == "LoadShiftBattery": + # special treatment for batteries, which are not a standard technology + if hasattr(m, 'BuildBattery'): + build = sum(m.BuildBattery[z, per] for z in m.LOAD_ZONES) + else: + build = 0 + else: + build = sum( + m.BuildGen[g, per] * adjust_psip_credit(g, target) + for g in m.GENERATION_PROJECTS + if m.gen_tech[g] == tech and (g, per) in m.GEN_BLD_YRS + ) + + if type(build) is int and build == 0: + # no matching projects found + if target == 0: + return Constraint.Skip + else: + print( + "WARNING: target was set for {} in {}, but no matching projects are available. " + "Model will be infeasible.".format(tech, per) + ) + return Constraint.Infeasible + elif psip: + return (build == target) + elif m.options.psip_minimal_renewables and any(txt in tech for txt in ["PV", "Wind", "Solar"]): + # only build the specified amount of renewables, no more + return (build == target) + else: + # treat the target as a lower bound + return (build >= target) + m.Enforce_Technology_Target = Constraint( + m.PERIODS, m.GEN_TECHS_AND_BATTERIES, rule=Enforce_Technology_Target_rule + ) + + aes_g = 'Oahu_AES' + aes_size = 180 + aes_bld_year = 1992 + m.AES_OPERABLE_PERIODS = Set(initialize = lambda m: + m.PERIODS_FOR_GEN_BLD_YR[aes_g, aes_bld_year] + ) + m.OperateAES = Var(m.AES_OPERABLE_PERIODS, within=Binary) + m.Enforce_AES_Deactivate = Constraint(m.TIMEPOINTS, rule=lambda m, tp: + Constraint.Skip if (aes_g, tp) not in m.GEN_TPS + else (m.DispatchGen[aes_g, tp] <= m.OperateAES[m.tp_period[tp]] * aes_size) + ) + m.AESDeactivateFixedCost = Expression(m.PERIODS, rule=lambda m, per: + 0.0 if per not in m.AES_OPERABLE_PERIODS + else - m.OperateAES[per] * aes_size * m.gen_fixed_om[aes_g, aes_bld_year] + ) + m.Cost_Components_Per_Period.append('AESDeactivateFixedCost') + + if psip: + # keep AES active until 9/2022; deactivate after that + # note: since a period starts in 2022, we retire before that + m.PSIP_Retire_AES = Constraint(m.AES_OPERABLE_PERIODS, rule=lambda m, per: + (m.OperateAES[per] == 1) if per + m.period_length_years[per] <= 2022 + else (m.OperateAES[per] == 0) + ) + + # before 2040: no biodiesel, and only 100-300 GWh of non-LNG fossil fuels + # period including 2040-2045: <= 300 GWh of oil; unlimited biodiesel or LNG + + # no biodiesel before 2040 (then phased in fast enough to meet the RPS) + m.EARLY_BIODIESEL_MARKETS = Set(dimen=2, initialize=lambda m: [ + (rfm, per) + for per in m.PERIODS if per + m.period_length_years[per] <= 2040 + for rfm in m.REGIONAL_FUEL_MARKETS if m.rfm_fuel == 'Biodiesel' + ]) + m.NoEarlyBiodiesel = Constraint(m.EARLY_BIODIESEL_MARKETS, rule=lambda m, rfm, per: + m.FuelConsumptionInMarket[rfm, per] == 0 + ) + + # # 100-300 GWh of non-LNG fuels in 2021-2040 (based on 2016-04 PSIP fig. 5-5) + # # Note: this is needed because we assume HECO plans to burn LNG in the future + # # even in scenarios where it costs more than oil. + # m.PSIP_HIGH_LNG_PERIODS = Set(initialize=lambda m: + # [per for per in m.PERIODS if per + m.period_length_years[per] > 2021 and per < 2045] + # ) + # m.OilProductionGWhPerYear = Expression(m.PERIODS, rule=lambda m, per: + # sum( + # m.DispatchGenByFuel[g, tp, f] * m.tp_weight_in_year[tp] * 0.001 # convert from MWh to GWh + # for f in ['Diesel', 'LSFO', 'LSFO-Diesel-Blend'] + # for g in m.GENERATION_PROJECTS_BY_FUEL[f] + # for tp in m.TPS_IN_PERIOD[per] if (g, tp) in m.GEN_TPS + # ) + # ) + # m.Upper_Limit_Oil_Power = Constraint(m.PERIODS, rule=lambda m, per: + # (m.OilProductionGWhPerYear[per] <= 300) + # if per + 0.5 * m.period_length_years[per] >= 2021 + # else + # Constraint.Skip + # ) + # # lower limit is in place to roughly reflect HECO's plan + # m.Lower_Limit_Oil_Power = Constraint(m.PERIODS, rule=lambda m, per: + # (m.OilProductionGWhPerYear[per] >= 100) + # if per + m.period_length_years[per] < 2040 # relax constraint if period ends after 2040 + # else + # Constraint.Skip + # ) + + # force LNG conversion in 2021 (modeled on similar constraint in lng_conversion.py) + # This could have extra code to skip the constraint if there are no periods after 2021, + # but it is unlikely ever to be run that way. + # Note: this is not needed if some plants are forced to run on LNG + # NOTE: this is no longer used; use '--force-lng-tier container' instead + # m.PSIP_Force_LNG_Conversion = Constraint(m.LOAD_ZONES, rule=lambda m, z: + # m.ConvertToLNG[ + # z, + # min(per for per in m.PERIODS if per + m.period_length_years[per] > 2021) + # ] == 1 + # ) + + # # Kahe 5, Kahe 6, Kalaeloa and CC_383 only burn LNG after 2021 + # # This is not used because it creates a weird situation where HECO runs less-efficient non-LNG + # # plants instead of more efficient LNG-capable plants on oil. + # # there may be a faster way to build this, but it's not clear what + # m.PSIP_Force_LNG_Use = Constraint(m.GEN_TP_FUELS, rule=lambda m, g, tp, fuel: + # (m.GenFuelUseRate[g, tp, fuel] == 0) + # if g in m.LNG_CONVERTED_PLANTS + # and fuel != 'LNG' + # and m.tp_period[tp] + m.period_length_years[m.tp_period[tp]] > 2021 + # else + # Constraint.Skip + # ) + + # don't allow construction of other technologies (e.g., pumped hydro, fuel cells) + advanced_tech_vars = [ + "BuildPumpedHydroMW", "BuildAnyPumpedHydro", + "BuildElectrolyzerMW", "BuildLiquifierKgPerHour", "BuildLiquidHydrogenTankKg", + "BuildFuelCellMW", + ] + def no_advanced_tech_rule_factory(v): + return lambda m, *k: (getattr(m, v)[k] == 0) + for v in advanced_tech_vars: + try: + var = getattr(m, v) + setattr(m, "PSIP_No_"+v, Constraint(var._index, rule=no_advanced_tech_rule_factory(v))) + except AttributeError: + pass # model doesn't have this var + + # # don't allow any changes to the fuel market, including bulk LNG + # # not used now; use "--force-lng-tier container" instead + # m.PSIP_Deactivate_Limited_RFM_Supply_Tiers = Constraint(m.RFM_SUPPLY_TIERS, + # rule=lambda m, r, p, st: + # Constraint.Skip if (m.rfm_supply_tier_limit[r, p, st] == float('inf')) + # else (m.RFMSupplyTierActivate[r, p, st] == 0) + # ) diff --git a/switch_model/hawaii/reserves.py b/switch_model/hawaii/reserves.py index c9b5946d8..930e0917a 100644 --- a/switch_model/hawaii/reserves.py +++ b/switch_model/hawaii/reserves.py @@ -17,21 +17,21 @@ def define_components(m): # projects that can provide reserves # TODO: add batteries, hydrogen and pumped storage to this - m.FIRM_GENECTS = Set( + m.FIRM_GENS = Set( initialize=m.GENERATION_PROJECTS, #filter=lambda m, p: m.gen_energy_source[p] not in ['Wind', 'Solar'] ) m.FIRM_GEN_TPS = Set( initialize=m.GEN_TPS, - filter=lambda m, p, tp: p in m.FIRM_GENECTS + filter=lambda m, p, tp: p in m.FIRM_GENS ) - m.CONTINGENCY_GENECTS = Set( + m.CONTINGENCY_GENS = Set( initialize=m.GENERATION_PROJECTS, filter=lambda m, p: p in m.DISCRETELY_SIZED_GENS ) m.CONTINGENCY_GEN_TPS = Set( initialize=m.GEN_TPS, - filter=lambda m, p, tp: p in m.CONTINGENCY_GENECTS + filter=lambda m, p, tp: p in m.CONTINGENCY_GENS ) # Calculate spinning reserve requirements. @@ -69,18 +69,18 @@ def define_components(m): if m.gen_tech[g] in m.regulating_reserve_fraction and (g, tp) in m.GEN_TPS )) +def define_dynamic_components(m): + # these are defined late, so they can check whether various components have been defined by other modules + # TODO: create a central registry for components that contribute to reserves + # Calculate contingency reserve requirements m.ContingencyReserveUpRequirement = Var(m.TIMEPOINTS, within=NonNegativeReals) # Apply a simple n-1 contingency reserve requirement; # we treat each project as a separate contingency - # Note: we provide reserves for the full committed amount of the project so that + # Note: we provide reserves for the full committed amount of each unit so that # if any of the capacity is being used for regulating reserves, that will be backed # up by contingency reserves. - # TODO: convert this to a big-m constraint with the following elements: - # binary on/off flag for each g, tp in CONTINGENCY_GEN_TPS - # constraint that ProjDispatch[g, tp] <= binary * gen_max_capacity[g] - # constraint that m.ContingencyReserveUpRequirement[tp] >= binary * m.gen_unit_size[g] - # (but this may make the model too slow to solve!) + # note: this uses a binary run/no-run flag, so it only provides one unit's worth of reserves m.CommitGenFlag = Var(m.CONTINGENCY_GEN_TPS, within=Binary) m.Set_CommitGenFlag = Constraint( m.CONTINGENCY_GEN_TPS, @@ -157,7 +157,7 @@ def define_dynamic_components(m): # from EVs and simple demand response, since it's not clear how high they could go ) - # Meet the reserve requirements + # Meet the reserve requirements (we use zero on RHS to enforce the right sign for the duals) m.Satisfy_Spinning_Reserve_Up_Requirement = Constraint(m.TIMEPOINTS, rule=lambda m, tp: m.SpinningReservesUpAvailable[tp] - m.SpinningReserveUpRequirement[tp] >= 0 ) @@ -189,7 +189,7 @@ def define_dynamic_components(m): # project reporting types are defined in save_custom_results.py # Note: this assumes timepoints are evenly spaced, and timeseries begin at midnight # m.CYCLING_PLANTS_TIMEPOINTS = Set(dimen=2, initialize=lambda m: [ - # (g, tp) for g in m.REPORTING_TYPE_GENECTS['Cycling'] + # (g, tp) for g in m.REPORTING_TYPE_GENS['Cycling'] # for tp in m.TPS_FOR_GEN[g] # ]) # m.Cycle_Plants = Constraint(m.CYCLING_PLANTS_TIMEPOINTS, rule=lambda m, g, tp: diff --git a/switch_model/hawaii/save_results.py b/switch_model/hawaii/save_results.py index e3d7a5be5..2b8f67b01 100644 --- a/switch_model/hawaii/save_results.py +++ b/switch_model/hawaii/save_results.py @@ -22,6 +22,7 @@ import os import switch_model.hawaii.util as util import switch_model.financials as financials +from collections import defaultdict from pyomo.environ import * def define_components(m): @@ -46,6 +47,16 @@ def summary_values(m): demand_components = [c for c in ('zone_demand_mw', 'ShiftDemand', 'ChargeEVs') if hasattr(m, c)] values = [] + # Cache SystemCostPerPeriod and SystemCost to speed up saving large models + # The time needed to directly access the expressions seems to rise quadratically + # with the number of timepoints, so it gets very slow for big models and we don't + # want to repeat it if possible (e.g., without caching, this function takes up + # to an hour for an 8760 Oahu model) + SystemCostPerPeriod = dict() + for p in m.PERIODS: + SystemCostPerPeriod[p] = value(m.SystemCostPerPeriod[p]) + SystemCost = sum(SystemCostPerPeriod[p] for p in m.PERIODS) + # scenario name and looping variables values.extend([ str(m.options.scenario_name), @@ -53,23 +64,23 @@ def summary_values(m): ]) # total cost (all periods) - values.append(m.SystemCost) + values.append(SystemCost) # m.SystemCost) # NPV of total cost / NPV of kWh generated (equivalent to spreading # all costs uniformly over all generation) values.append( - m.SystemCost + SystemCost # m.SystemCost / sum( m.bring_timepoint_costs_to_base_year[t] * 1000.0 * sum(getattr(m, c)[z, t] for c in demand_components for z in m.LOAD_ZONES) - for t in m.TIMEPOINTS + for t in m.TIMEPOINTS ) ) # total cost / kWh generated in each period # (both discounted to today, so the discounting cancels out) values.extend([ - m.SystemCostPerPeriod[p] + SystemCostPerPeriod[p] # m.SystemCostPerPeriod[p] / sum( m.bring_timepoint_costs_to_base_year[t] * 1000.0 * sum(getattr(m, c)[z, t] for c in demand_components for z in m.LOAD_ZONES) @@ -196,12 +207,21 @@ def gen_energy_source(g): built_gens = tuple(sorted(set( g for pe in m.PERIODS for g in m.GENERATION_PROJECTS if value(m.GenCapacity[g, pe]) > 0.001 ))) - operate_gen_in_period = tuple(set( - (g, m.tp_period[tp]) - for g, tp in m.GEN_TPS if value(m.DispatchGen[g, tp]) > 0.001 - )) - built_tech = tuple(set(m.gen_tech[g] for g in built_gens)) - built_energy_source = tuple(set(gen_energy_source(g) for g in built_gens)) + active_periods_for_gen = defaultdict(set) + for (g, tp) in m.GEN_TPS: + if value(m.DispatchGen[g, tp]) > 0.001: + active_periods_for_gen[g].add(m.tp_period[tp]) + # add the periods between the first and last active period if capacity was available then + operate_gen_in_period = set() + for g, active_periods in active_periods_for_gen.items(): + start = min(active_periods) + end = max(active_periods) + for p in m.PERIODS: + if start <= p <= end and value(m.GenCapacity[g, p]) > 0: + operate_gen_in_period.add((g, p)) + + built_tech = tuple(sorted(set(m.gen_tech[g] for g in built_gens))) + built_energy_source = tuple(sorted(set(gen_energy_source(g) for g in built_gens))) battery_capacity_mw = lambda m, z, pe: ( (m.Battery_Capacity[z, pe] * m.battery_max_discharge / m.battery_min_discharge_time) @@ -241,129 +261,142 @@ def gen_energy_source(g): ) ) - def cost_breakdown_details(m, z, pe): - values = [z, pe] - # capacity built, conventional plants - - values += [ + util.write_table(m, m.LOAD_ZONES, m.PERIODS, + output_file=os.path.join(outputs_dir, "production_by_technology{t}.tsv".format(t=tag)), + headings=("load_zone", "period") + built_tech, + values=lambda m, z, pe: (z, pe,) + tuple( sum( - m.BuildGen[g, pe] - for g in built_gens - if m.gen_tech[g] == t and m.gen_load_zone[g] == z and (g, pe) in m.BuildGen + m.DispatchGen[g, tp] * m.tp_weight_in_year[tp] * 0.001 # MWh -> GWh + for g in built_gens if m.gen_tech[g] == t and m.gen_load_zone[g] == z + for tp in m.TPS_FOR_GEN_IN_PERIOD[g, pe] ) for t in built_tech - ] - # capacity built, batteries, MW and MWh - if hasattr(m, "BuildBattery"): - values.extend([ - m.BuildBattery[z, pe]/m.battery_min_discharge_time, - m.BuildBattery[z, pe] - ]) - else: - values.extend([0.0, 0.0]) - # capacity built, hydro - values.append( - sum( - m.BuildPumpedHydroMW[g, pe] - for g in m.PH_GENS if m.ph_load_zone[g]==z - ) if hasattr(m, "BuildPumpedHydroMW") else 0.0, - ) - # capacity built, hydrogen - if hasattr(m, "BuildElectrolyzerMW"): - values.extend([ - m.BuildElectrolyzerMW[z, pe], - m.BuildLiquifierKgPerHour[z, pe], - m.BuildLiquidHydrogenTankKg[z, pe], - m.BuildFuelCellMW[z, pe] - ]) - else: - values.extend([0.0, 0.0, 0.0, 0.0]) - - # number of EVs and conventional vehicles - if hasattr(m, 'ev_share'): - values.append(m.n_all_vehicles[z, pe] * m.ev_share[z, pe]) - values.append(m.n_all_vehicles[z, pe] * (1.0 - m.ev_share[z, pe])) - # import pdb; pdb.set_trace() - - # capital investments - # regular projects - values += [ - sum( - m.BuildGen[g, pe] * (m.gen_overnight_cost[g, pe] + m.gen_connect_cost_per_mw[g]) - for g in built_gens - if m.gen_tech[g] == t and m.gen_load_zone[g] == z \ - and (g, pe) in m.GEN_BLD_YRS - ) - for t in built_tech - ] - # batteries - if hasattr(m, 'battery_capital_cost_per_mwh_capacity'): - # models with single capital cost (defunct) - values.append(m.BuildBattery[z, pe] * m.battery_capital_cost_per_mwh_capacity) - elif hasattr(m, 'battery_capital_cost_per_mwh_capacity_by_year'): - values.append(m.BuildBattery[z, pe] * m.battery_capital_cost_per_mwh_capacity_by_year[pe]) - else: - values.append(0.0) - # hydro - values.append( - sum( - m.BuildPumpedHydroMW[g, pe] * m.ph_capital_cost_per_mw[g] - for g in m.PH_GENS if m.ph_load_zone[g]==z - ) if hasattr(m, "BuildPumpedHydroMW") else 0.0, - ) - # hydrogen - if hasattr(m, "BuildElectrolyzerMW"): - values.extend([ - m.BuildElectrolyzerMW[z, pe] * m.hydrogen_electrolyzer_capital_cost_per_mw, - m.BuildLiquifierKgPerHour[z, pe] * m.hydrogen_liquifier_capital_cost_per_kg_per_hour, - m.BuildLiquidHydrogenTankKg[z, pe] * m.liquid_hydrogen_tank_capital_cost_per_kg, - m.BuildFuelCellMW[z, pe] * m.hydrogen_fuel_cell_capital_cost_per_mw - ]) - else: - values.extend([0.0, 0.0, 0.0, 0.0]) - - # _annual_ fuel expenditures - if hasattr(m, "REGIONAL_FUEL_MARKETS"): - values.extend([ - sum(m.ConsumeFuelTier[rfm_st] * m.rfm_supply_tier_cost[rfm_st] for rfm_st in m.SUPPLY_TIERS_FOR_RFM_PERIOD[rfm, pe]) - for rfm in m.REGIONAL_FUEL_MARKETS - ]) - # costs to expand fuel markets (this could later be disaggregated by market and tier) - if hasattr(m, "RFM_Fixed_Costs_Annual"): - values.append(m.RFM_Fixed_Costs_Annual[pe]) - # TODO: add similar code for fuel_costs module instead of fuel_markets module - - # total cost per period - values.append(annualize_present_value_period_cost(m, pe, m.SystemCostPerPeriod[pe])) - - # total cost per year for transport - if hasattr(m, "ev_extra_annual_cost"): - values.append(m.ev_extra_annual_cost[pe]) - values.append(m.ice_annual_fuel_cost[pe]) - - return values - - util.write_table(m, m.LOAD_ZONES, m.PERIODS, - output_file=os.path.join(outputs_dir, "cost_breakdown{t}.tsv".format(t=tag)), - headings=("load_zone", "period") + tuple(t+"_mw_added" for t in built_tech) - + ("batteries_mw_added", "batteries_mwh_added", "hydro_mw_added") - + ( "h2_electrolyzer_mw_added", "h2_liquifier_kg_per_hour_added", - "liquid_h2_tank_kg_added", "fuel_cell_mw_added") - + (('ev_count', 'ice_count') if hasattr(m, 'ev_share') else ()) - + tuple(t+"_overnight_cost" for t in built_tech) - + ("batteries_overnight_cost", "hydro_overnight_cost") - + ( "h2_electrolyzer_overnight_cost", "h2_liquifier_overnight_cost", - "liquid_h2_tank_overnight_cost", "fuel_cell_overnight_cost") - + (tuple(rfm+"_annual_cost" for rfm in m.REGIONAL_FUEL_MARKETS) - if hasattr(m, "REGIONAL_FUEL_MARKETS") else ()) - + (("fuel_market_expansion_annual_cost",) - if hasattr(m, "RFM_Fixed_Costs_Annual") else ()) - + ('total_electricity_cost',) - + (('ev_extra_capital_recovery',) - if hasattr(m, 'ev_extra_annual_cost') else ()) - + (('ice_annual_fuel_cost',) if hasattr(m, 'ice_annual_fuel_cost') else ()), - values=cost_breakdown_details + ) # TODO: add hydro and hydrogen ) + + # def cost_breakdown_details(m, z, pe): + # values = [z, pe] + # # capacity built, conventional plants + # + # values += [ + # sum( + # m.BuildGen[g, pe] + # for g in built_gens + # if m.gen_tech[g] == t and m.gen_load_zone[g] == z and (g, pe) in m.BuildGen + # ) + # for t in built_tech + # ] + # # capacity built, batteries, MW and MWh + # if hasattr(m, "BuildBattery"): + # values.extend([ + # m.BuildBattery[z, pe]/m.battery_min_discharge_time, + # m.BuildBattery[z, pe] + # ]) + # else: + # values.extend([0.0, 0.0]) + # # capacity built, hydro + # values.append( + # sum( + # m.BuildPumpedHydroMW[g, pe] + # for g in m.PH_GENS if m.ph_load_zone[g]==z + # ) if hasattr(m, "BuildPumpedHydroMW") else 0.0, + # ) + # # capacity built, hydrogen + # if hasattr(m, "BuildElectrolyzerMW"): + # values.extend([ + # m.BuildElectrolyzerMW[z, pe], + # m.BuildLiquifierKgPerHour[z, pe], + # m.BuildLiquidHydrogenTankKg[z, pe], + # m.BuildFuelCellMW[z, pe] + # ]) + # else: + # values.extend([0.0, 0.0, 0.0, 0.0]) + # + # # number of EVs and conventional vehicles + # if hasattr(m, 'ev_share'): + # values.append(m.n_all_vehicles[z, pe] * m.ev_share[z, pe]) + # values.append(m.n_all_vehicles[z, pe] * (1.0 - m.ev_share[z, pe])) + # # import pdb; pdb.set_trace() + # + # # capital investments + # # regular projects + # values += [ + # sum( + # m.BuildGen[g, pe] * (m.gen_overnight_cost[g, pe] + m.gen_connect_cost_per_mw[g]) + # for g in built_gens + # if m.gen_tech[g] == t and m.gen_load_zone[g] == z \ + # and (g, pe) in m.GEN_BLD_YRS + # ) + # for t in built_tech + # ] + # # batteries + # if hasattr(m, 'battery_capital_cost_per_mwh_capacity'): + # # models with single capital cost (defunct) + # values.append(m.BuildBattery[z, pe] * m.battery_capital_cost_per_mwh_capacity) + # elif hasattr(m, 'battery_capital_cost_per_mwh_capacity_by_year'): + # values.append(m.BuildBattery[z, pe] * m.battery_capital_cost_per_mwh_capacity_by_year[pe]) + # else: + # values.append(0.0) + # # hydro + # values.append( + # sum( + # m.BuildPumpedHydroMW[g, pe] * m.ph_capital_cost_per_mw[g] + # for g in m.PH_GENS if m.ph_load_zone[g]==z + # ) if hasattr(m, "BuildPumpedHydroMW") else 0.0, + # ) + # # hydrogen + # if hasattr(m, "BuildElectrolyzerMW"): + # values.extend([ + # m.BuildElectrolyzerMW[z, pe] * m.hydrogen_electrolyzer_capital_cost_per_mw, + # m.BuildLiquifierKgPerHour[z, pe] * m.hydrogen_liquifier_capital_cost_per_kg_per_hour, + # m.BuildLiquidHydrogenTankKg[z, pe] * m.liquid_hydrogen_tank_capital_cost_per_kg, + # m.BuildFuelCellMW[z, pe] * m.hydrogen_fuel_cell_capital_cost_per_mw + # ]) + # else: + # values.extend([0.0, 0.0, 0.0, 0.0]) + # + # # _annual_ fuel expenditures + # if hasattr(m, "REGIONAL_FUEL_MARKETS"): + # values.extend([ + # sum(m.ConsumeFuelTier[rfm_st] * m.rfm_supply_tier_cost[rfm_st] for rfm_st in m.SUPPLY_TIERS_FOR_RFM_PERIOD[rfm, pe]) + # for rfm in m.REGIONAL_FUEL_MARKETS + # ]) + # # costs to expand fuel markets (this could later be disaggregated by market and tier) + # if hasattr(m, "RFM_Fixed_Costs_Annual"): + # values.append(m.RFM_Fixed_Costs_Annual[pe]) + # # TODO: add similar code for fuel_costs module instead of fuel_markets module + # + # # total cost per period + # values.append(annualize_present_value_period_cost(m, pe, m.SystemCostPerPeriod[pe])) + # + # # total cost per year for transport + # if hasattr(m, "ev_extra_annual_cost"): + # values.append(m.ev_extra_annual_cost[pe]) + # values.append(m.ice_annual_fuel_cost[pe]) + # + # return values + # + # util.write_table(m, m.LOAD_ZONES, m.PERIODS, + # output_file=os.path.join(outputs_dir, "cost_breakdown{t}.tsv".format(t=tag)), + # headings=("load_zone", "period") + tuple(t+"_mw_added" for t in built_tech) + # + ("batteries_mw_added", "batteries_mwh_added", "hydro_mw_added") + # + ( "h2_electrolyzer_mw_added", "h2_liquifier_kg_per_hour_added", + # "liquid_h2_tank_kg_added", "fuel_cell_mw_added") + # + (('ev_count', 'ice_count') if hasattr(m, 'ev_share') else ()) + # + tuple(t+"_overnight_cost" for t in built_tech) + # + ("batteries_overnight_cost", "hydro_overnight_cost") + # + ( "h2_electrolyzer_overnight_cost", "h2_liquifier_overnight_cost", + # "liquid_h2_tank_overnight_cost", "fuel_cell_overnight_cost") + # + (tuple(rfm+"_annual_cost" for rfm in m.REGIONAL_FUEL_MARKETS) + # if hasattr(m, "REGIONAL_FUEL_MARKETS") else ()) + # + (("fuel_market_expansion_annual_cost",) + # if hasattr(m, "RFM_Fixed_Costs_Annual") else ()) + # + ('total_electricity_cost',) + # + (('ev_extra_capital_recovery',) + # if hasattr(m, 'ev_extra_annual_cost') else ()) + # + (('ice_annual_fuel_cost',) if hasattr(m, 'ice_annual_fuel_cost') else ()), + # values=cost_breakdown_details + # ) # util.write_table(m, m.PERIODS, # output_file=os.path.join(outputs_dir, "capacity{t}.tsv".format(t=t)), diff --git a/switch_model/hawaii/scenario_data.py b/switch_model/hawaii/scenario_data.py index cd9362aa0..435d240fe 100644 --- a/switch_model/hawaii/scenario_data.py +++ b/switch_model/hawaii/scenario_data.py @@ -1,3 +1,29 @@ +# TODO: make this get data from the redr server via an HTTP api instead of psycopg2, as follows: + +# create a .rpy script on the redr server that can accept form data (the args dict) via POST +# and then return a .zip file containing all the files created by write_tables (most of the +# code in this module would go into that script). This can create the files as text blobs and +# then collect them into a single .zip file using the zip module +# Info on zipping multiple files together in memory: https://stackoverflow.com/a/25194850/3830997 +# See here for info on .rpy files: +# https://twistedmatrix.com/documents/15.0.0/web/howto/using-twistedweb.html#web-howto-using-twistedweb-rpys +# See here for info on receiving POST requests: +# https://www.saltycrane.com/blog/2010/08/twisted-web-post-example-json/ + +# client side will then just send a POST request with the args dictionary (probably using the +# requests module), receive back a zip file with all the relevant CSVs (probably with a whole +# relative directory structure). Client may also need to convert line endings (or unzip may do +# it automatically). +# See here for info on sending a Python dict as the body in a +# POST request: https://stackoverflow.com/a/14804320/3830997 +# https://stackoverflow.com/questions/15694120/why-does-http-post-request-body-need-to-be-json-enconded-in-python +# https://stackoverflow.com/questions/35212279/python-request-post-not-accepting-dictionary +# (the latter two are interesting edge cases but may be doing it wrong) +# Unzipping files in Python: https://stackoverflow.com/questions/3451111/unzipping-files-in-python +# some random info on converting line endings with Python zip/unzip: +# https://bytes.com/topic/python/answers/692751-module-zipfile-writestr-line-endings-issue +# https://stackoverflow.com/questions/2613800/how-to-convert-dos-windows-newline-crlf-to-unix-newline-n-in-a-bash-script + import time, sys, collections, os from textwrap import dedent from switch_model import __version__ as switch_version @@ -453,7 +479,7 @@ def write_tables(**args): ORDER BY 1 ) SELECT - "GENERATION_PROJECT" as project, + "GENERATION_PROJECT", power_start_mw, power_end_mw, incremental_heat_rate_mbtu_per_mwhr, fuel_use_rate_mmbtu_per_h FROM curves c JOIN study_projects p using (technology) diff --git a/switch_model/hawaii/unserved_load.py b/switch_model/hawaii/unserved_load.py index 40e55119e..48ac5ea94 100644 --- a/switch_model/hawaii/unserved_load.py +++ b/switch_model/hawaii/unserved_load.py @@ -24,7 +24,8 @@ def define_components(m): m.UnservedLoad = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) # total cost for unserved load m.UnservedLoadPenalty = Expression(m.TIMEPOINTS, rule=lambda m, tp: - sum(m.UnservedLoad[z, tp] * m.unserved_load_penalty_per_mwh for z in m.LOAD_ZONES) + m.tp_duration_hrs[tp] + * sum(m.UnservedLoad[z, tp] * m.unserved_load_penalty_per_mwh for z in m.LOAD_ZONES) ) # add the unserved load to the model's energy balance m.Zone_Power_Injections.append('UnservedLoad') From ffc98add58903a4ecb38c916823b78929b2691fc Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 3 Jan 2018 14:50:36 -1000 Subject: [PATCH 08/51] streamline index unpacking code --- switch_model/reporting/__init__.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/switch_model/reporting/__init__.py b/switch_model/reporting/__init__.py index 0aa9a84b5..4ea4bf209 100644 --- a/switch_model/reporting/__init__.py +++ b/switch_model/reporting/__init__.py @@ -85,22 +85,19 @@ def format_row(row): print "these tuples automatically. Please update your code to work with unpacked index values." print "Problem occured with {}.".format(values.func_code) -def unpack_elements(it): - """Unpack any multi-element objects within it, to make a single flat list. +def unpack_elements(items): + """Unpack any multi-element objects within items, to make a single flat list. Note: this is not recursive. This is used to flatten the product of a multi-dimensional index with anything else.""" l=[] - for t in it: - if isinstance(t, basestring): - l.append(t) + for x in items: + if isinstance(x, basestring): + l.append(x) else: try: - # check if it's iterable - iterator = iter(t) - for i in iterator: - l.append(i) - except TypeError: - l.append(t) + l.extend(x) + except TypeError: # x isn't iterable + l.append(x) return l From 45803a1d9442c48c33aa96430097a63868f04367 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 3 Jan 2018 23:29:23 -1000 Subject: [PATCH 09/51] interim version of Hawaii reserves for case study for paper --- switch_model/hawaii/demand_response_simple.py | 33 +++-- switch_model/hawaii/reserves.py | 127 ++++++++++-------- switch_model/hawaii/unserved_load.py | 14 ++ 3 files changed, 110 insertions(+), 64 deletions(-) diff --git a/switch_model/hawaii/demand_response_simple.py b/switch_model/hawaii/demand_response_simple.py index 063182e86..a86af8e67 100644 --- a/switch_model/hawaii/demand_response_simple.py +++ b/switch_model/hawaii/demand_response_simple.py @@ -16,18 +16,10 @@ def define_components(m): m.ShiftDemand = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=Reals, bounds=lambda m, z, t: ( (-1.0) * m.demand_response_max_share * m.zone_demand_mw[z, t], - None + # assume all shiftable load can be concentrated into 3 hours (no less) + None # 24/3 * m.demand_response_max_share * m.zone_demand_mw[z, t] ) ) - # Register with spinning reserves if it is available - if 'Spinning_Reserve_Up_Provisions' in dir(m): - m.HIDemandResponseSimpleSpinningReserveUp = Expression( - m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: - sum(m.DemandResponse[z, t] - m.DemandResponse[z, t].lb - for z in m.ZONES_IN_BALANCING_AREA[b]) - ) - m.Spinning_Reserve_Up_Provisions.append('HIDemandResponseSimpleSpinningReserveUp') # all changes to demand must balance out over the course of the day m.Demand_Response_Net_Zero = Constraint(m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, z, ts: @@ -36,3 +28,24 @@ def define_components(m): # add demand response to the zonal energy balance m.Zone_Power_Withdrawals.append('ShiftDemand') + + # # calculate up and down reserves (from supply perspective, so "up" means less load) + # # note: the bids are negative quantities, indicating _production_ of reserves; + # # they contribute to the reserve requirement with opposite sign + # m.DemandUpReserves = Expression(m.LOAD_ZONES, m.TIMEPOINTS, + # rule=lambda m, z, tp: m.DemandResponse[z, t] - m.DemandResponse[z, t].lb + # ) + # m.DemandDownReserves = Expression(m.LOAD_ZONES, m.TIMEPOINTS, + # # assume loads could quickly be + # rule=lambda m, z, tp: m.DemandResponse[z, t].ub - m.DemandResponse[z, t] + # ) + + # Register with spinning reserves if it is available + if hasattr(m, 'Spinning_Reserve_Up_Provisions'): + m.HIDemandResponseSimpleSpinningReserveUp = Expression( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, t: + sum(m.DemandResponse[z, t] - m.DemandResponse[z, t].lb + for z in m.ZONES_IN_BALANCING_AREA[b]) + ) + m.Spinning_Reserve_Up_Provisions.append('HIDemandResponseSimpleSpinningReserveUp') diff --git a/switch_model/hawaii/reserves.py b/switch_model/hawaii/reserves.py index 930e0917a..9b915f150 100644 --- a/switch_model/hawaii/reserves.py +++ b/switch_model/hawaii/reserves.py @@ -5,6 +5,19 @@ import os from pyomo.environ import * +# TODO: use standard reserves module for this + +def define_arguments(argparser): + argparser.add_argument('--reserves-from-storage', action='store_true', default=True, + help="Allow storage (batteries and hydrogen) to provide up- and down-reserves.") + argparser.add_argument('--no-reserves-from-storage', dest='reserves_from_storage', + action='store_false', + help="Don't allow storage (batteries and hydrogen) to provide up- and down-reserves.") + argparser.add_argument('--reserves-from-demand-response', action='store_true', default=True, + help="Allow demand response to provide up- and down-reserves.") + argparser.add_argument('--no-reserves-from-demand-response', dest='reserves_from_demand_response', + action='store_false', + help="Don't allow demand response to provide up- and down-reserves.") def define_components(m): """ @@ -93,69 +106,75 @@ def define_dynamic_components(m): # m.ContingencyReserveUpRequirement[tp] >= m.CommitGen[g, tp] m.ContingencyReserveUpRequirement[tp] >= m.CommitGenFlag[g, tp] * m.gen_unit_size[g] ) + + m.ContingencyReserveDownRequirement = Var(m.TIMEPOINTS, within=NonNegativeReals) + # For now, we provide down reserves equal to 10% of all loads, including + # baseline load, demand response adjustment, electric vehicles, battery charging + # and hydrogen. It would be possible to split these into centralized and distributed + # loads and allocate separately for them (e.g., contingency reserves exceed + # 10% of total decentralized load and the size of the contingency for each + # centralized load; however, it's not obvious how to set the contingency for + # centralized loads, which are modular and may be divided between several locations. + # So we just assume we could lose 10% of all loads of any type, at any time.) + m.ContingencyReserveDownRequirement_Calculate = Constraint( + m.TIMEPOINTS, + rule=lambda m, tp: + m.ContingencyReserveDownRequirement[tp] >= + 0.1 * sum(getattr(m, x)[z, tp] for x in m.Zone_Power_Withdrawals for z in m.LOAD_ZONES) + ) - # Calculate total spinning reserve requirement + # Calculate total spinning reserve requirements m.SpinningReserveUpRequirement = Expression(m.TIMEPOINTS, rule=lambda m, tp: m.regulating_reserve_requirement_mw[tp] + m.ContingencyReserveUpRequirement[tp] ) - # require 10% down reserves at all times m.SpinningReserveDownRequirement = Expression(m.TIMEPOINTS, rule=lambda m, tp: - 0.10 * sum(m.zone_demand_mw[z, tp] for z in m.LOAD_ZONES) + m.ContingencyReserveDownRequirement[tp] ) -def define_dynamic_components(m): - # these are defined late, so they can check whether various components have been defined by other modules - # TODO: create a central registry for components that contribute to reserves # Available reserves - m.SpinningReservesUpAvailable = Expression(m.TIMEPOINTS, rule=lambda m, tp: - sum(m.DispatchSlackUp[p, tp] for p in m.FIRM_GENECTS if (p, tp) in m.GEN_TPS) - + ( - sum(m.BatterySlackUp[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'BatterySlackDown') - else 0.0 - ) - + ( - sum(m.HydrogenSlackUp[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'HydrogenSlackUp') - else 0.0 - ) - + ( - sum(m.DemandUpReserves[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'DemandUpReserves') - else 0.0 - ) - + ( - sum(m.ShiftDemand[z, tp] - m.ShiftDemand[z, tp].lb for z in m.LOAD_ZONES) - if hasattr(m, 'ShiftDemand') - else 0.0 - ) - + ( - sum(m.ChargeEVs[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'ChargeEVs') and hasattr(m.options, 'ev_timing') and m.options.ev_timing=='optimal' - else 0.0 - ) - ) - m.SpinningReservesDownAvailable = Expression(m.TIMEPOINTS, rule=lambda m, tp: - sum(m.DispatchSlackDown[p, tp] for p in m.FIRM_GENECTS if (p, tp) in m.GEN_TPS) - + ( - sum(m.BatterySlackDown[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'BatterySlackDown') - else 0.0 - ) - + ( - sum(m.HydrogenSlackDown[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'HydrogenSlackDown') - else 0.0 - ) - + ( - sum(m.DemandDownReserves[z, tp] for z in m.LOAD_ZONES) - if hasattr(m, 'DemandDownReserves') - else 0.0 - ) - # note: we currently ignore down-reserves (option of increasing consumption) - # from EVs and simple demand response, since it's not clear how high they could go - ) + def expr(m, tp): + avail = sum(m.DispatchSlackUp[p, tp] for p in m.FIRM_GENS if (p, tp) in m.GEN_TPS) + if m.options.reserves_from_storage: + if hasattr(m, 'BatterySlackUp'): + avail += sum(m.BatterySlackUp[z, tp] for z in m.LOAD_ZONES) + if hasattr(m, 'HydrogenSlackUp'): + avail += sum(m.HydrogenSlackUp[z, tp] for z in m.LOAD_ZONES) + if m.options.reserves_from_demand_response: + if hasattr(m, 'DemandUpReserves'): + avail += sum(m.DemandUpReserves[z, tp] for z in m.LOAD_ZONES) + if hasattr(m, 'ShiftDemand'): + avail += sum(m.ShiftDemand[z, tp] - m.ShiftDemand[z, tp].lb for z in m.LOAD_ZONES) + if hasattr(m, 'ChargeEVs') and hasattr(m.options, 'ev_timing') and m.options.ev_timing=='optimal': + avail += sum(m.ChargeEVs[z, tp] for z in m.LOAD_ZONES) + if hasattr(m, 'UnservedUpReserves'): + avail += m.UnservedUpReserves[tp] + # if tp == 2045012604: + # print "inspect avail to see up reserve calculation" + # import pdb; pdb.set_trace() + return avail + m.SpinningReservesUpAvailable = Expression(m.TIMEPOINTS, rule=expr) + def expr(m, tp): + avail = sum(m.DispatchSlackDown[p, tp] for p in m.FIRM_GENS if (p, tp) in m.GEN_TPS) + if m.options.reserves_from_storage: + if hasattr(m, 'BatterySlackDown'): + avail += sum(m.BatterySlackDown[z, tp] for z in m.LOAD_ZONES) + if hasattr(m, 'HydrogenSlackDown'): + avail += sum(m.HydrogenSlackDown[z, tp] for z in m.LOAD_ZONES) + if m.options.reserves_from_demand_response: + if hasattr(m, 'DemandDownReserves'): + avail += sum(m.DemandDownReserves[z, tp] for z in m.LOAD_ZONES) + if hasattr(m, 'ShiftDemand'): + # avail += sum(m.ShiftDemand[z, tp].ub - m.ShiftDemand[z, tp] for z in m.LOAD_ZONES) + ub = 24/3 * m.demand_response_max_share * m.zone_demand_mw[z, tp] + avail += sum(ub - m.ShiftDemand[z, tp] for z in m.LOAD_ZONES) + # note: we currently ignore down-reserves (option of increasing consumption) + # from EVs since it's not clear how high they could go; we could revisit this if + # down-reserves have a positive price at equilibrium (probabably won't) + if hasattr(m, 'UnservedDownReserves'): + avail += m.UnservedDownReserves[tp] + return avail + m.SpinningReservesDownAvailable = Expression(m.TIMEPOINTS, rule=expr) # Meet the reserve requirements (we use zero on RHS to enforce the right sign for the duals) m.Satisfy_Spinning_Reserve_Up_Requirement = Constraint(m.TIMEPOINTS, rule=lambda m, tp: diff --git a/switch_model/hawaii/unserved_load.py b/switch_model/hawaii/unserved_load.py index 48ac5ea94..1a6737d12 100644 --- a/switch_model/hawaii/unserved_load.py +++ b/switch_model/hawaii/unserved_load.py @@ -31,3 +31,17 @@ def define_components(m): m.Zone_Power_Injections.append('UnservedLoad') # add the unserved load penalty to the model's objective function m.Cost_Components_Per_TP.append('UnservedLoadPenalty') + + # amount of unserved reserves during each timepoint + m.UnservedUpReserves = Var(m.TIMEPOINTS, within=NonNegativeReals) + m.UnservedDownReserves = Var(m.TIMEPOINTS, within=NonNegativeReals) + # total cost for unserved reserves (90% as high as cost of unserved load, + # to make the model prefer to serve load when possible) + m.UnservedReservePenalty = Expression(m.TIMEPOINTS, rule=lambda m, tp: + m.tp_duration_hrs[tp] + * 0.9 + * m.unserved_load_penalty_per_mwh + * (m.UnservedUpReserves[tp] + m.UnservedDownReserves[tp]) + ) + # add the unserved load penalty to the model's objective function + m.Cost_Components_Per_TP.append('UnservedReservePenalty') From bc831e072b459ca84c04e23794f25383b3a178e6 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 10 Jan 2018 13:37:50 -1000 Subject: [PATCH 10/51] move advanced demand response from hawaii package to new demand_response.iterative package --- .../demand_response/iterative/__init__.py} | 0 .../iterative}/constant_elasticity_demand_system.py | 0 .../demand_response/iterative}/r_demand_system.py | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename switch_model/{hawaii/demand_response.py => balancing/demand_response/iterative/__init__.py} (100%) rename switch_model/{hawaii => balancing/demand_response/iterative}/constant_elasticity_demand_system.py (100%) rename switch_model/{hawaii => balancing/demand_response/iterative}/r_demand_system.py (100%) diff --git a/switch_model/hawaii/demand_response.py b/switch_model/balancing/demand_response/iterative/__init__.py similarity index 100% rename from switch_model/hawaii/demand_response.py rename to switch_model/balancing/demand_response/iterative/__init__.py diff --git a/switch_model/hawaii/constant_elasticity_demand_system.py b/switch_model/balancing/demand_response/iterative/constant_elasticity_demand_system.py similarity index 100% rename from switch_model/hawaii/constant_elasticity_demand_system.py rename to switch_model/balancing/demand_response/iterative/constant_elasticity_demand_system.py diff --git a/switch_model/hawaii/r_demand_system.py b/switch_model/balancing/demand_response/iterative/r_demand_system.py similarity index 100% rename from switch_model/hawaii/r_demand_system.py rename to switch_model/balancing/demand_response/iterative/r_demand_system.py From 19b13c5ecba30cf2a11ef7f5d3babc7fe174c48f Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 10 Jan 2018 13:45:05 -1000 Subject: [PATCH 11/51] allow specification of fixed energy-power ratios for storage, possibly 0 (for reserve batteries) --- switch_model/generators/extensions/storage.py | 78 ++++++++++++++++--- switch_model/hawaii/reserves.py | 45 +++++++++-- switch_model/hawaii/scenario_data.py | 52 +++++++------ 3 files changed, 135 insertions(+), 40 deletions(-) diff --git a/switch_model/generators/extensions/storage.py b/switch_model/generators/extensions/storage.py index 6d319461b..62ab451a9 100644 --- a/switch_model/generators/extensions/storage.py +++ b/switch_model/generators/extensions/storage.py @@ -38,9 +38,21 @@ def define_components(mod): gen_store_to_release_ratio[STORAGE_GENS] describes the maximum rate that energy can be stored, expressed as a ratio of discharge power capacity. This is an optional parameter and will default to 1. If a - storage project has 1 MW of dischage capacity and a max_store_rate + storage project has 1 MW of dischage capacity and a gen_store_to_release_ratio of 1.2, then it can consume up to 1.2 MW of power while charging. + gen_storage_energy_to_power_ratio[STORAGE_GENS], if specified, restricts + the storage capacity (in MWh) to be a fixed multiple of the output + power (in MW), i.e., specifies a particular number of hours of + storage capacity. Omit this column or specify "." to allow Switch + to choose the energy/power ratio. (Note: gen_storage_energy_overnight_cost + or gen_overnight_cost should often be set to 0 when using this.) + + gen_storage_max_cycles_per_year[STORAGE_GENS], if specified, restricts + the number of charge/discharge cycles each storage project can perform + per year; one cycle is defined as discharging an amount of energy + equal to the storage capacity of the project. + gen_storage_energy_overnight_cost[(g, bld_yr) in STORAGE_GEN_BLD_YRS] is the overnight capital cost per MWh of energy capacity for building the given storage technology installed in the @@ -86,14 +98,28 @@ def define_components(mod): """ mod.STORAGE_GENS = Set(within=mod.GENERATION_PROJECTS) + mod.STORAGE_GEN_PERIODS = Set( + within=mod.GEN_PERIODS, + initialize=lambda m: [(g, p) for g in m.STORAGE_GENS for p in m.PERIODS_FOR_GEN[g]] + ) mod.gen_storage_efficiency = Param( mod.STORAGE_GENS, within=PercentFraction) + # TODO: rename to gen_charge_to_discharge_ratio? mod.gen_store_to_release_ratio = Param( mod.STORAGE_GENS, within=PositiveReals, default=1.0) + mod.gen_storage_energy_to_power_ratio = Param( + mod.STORAGE_GENS, + within=NonNegativeReals, + default=float("inf")) # inf is a flag that no value is specified (nan and None don't work) + mod.gen_storage_max_cycles_per_year = Param( + mod.STORAGE_GENS, + within=NonNegativeReals, + default=float('inf')) + # TODO: build this set up instead of filtering down, to improve performance mod.STORAGE_GEN_BLD_YRS = Set( dimen=2, initialize=mod.GEN_BLD_YRS, @@ -134,7 +160,9 @@ def define_components(mod): within=NonNegativeReals) # Summarize storage charging for the energy balance equations - def StorageNetCharge_rule(m, z, t): + # TODO: rename this StorageTotalCharging or similar (to indicate it's a + # sum for a zone, not a net quantity for a project) + def rule(m, z, t): # Construct and cache a set for summation as needed if not hasattr(m, 'Storage_Charge_Summation_dict'): m.Storage_Charge_Summation_dict = collections.defaultdict(set) @@ -142,15 +170,22 @@ def StorageNetCharge_rule(m, z, t): z2 = m.gen_load_zone[g] m.Storage_Charge_Summation_dict[z2, t2].add(g) # Use pop to free memory - relevant_projects = m.Storage_Charge_Summation_dict.pop((z, t)) + relevant_projects = m.Storage_Charge_Summation_dict.pop((z, t), {}) return sum(m.ChargeStorage[g, t] for g in relevant_projects) - mod.StorageNetCharge = Expression( - mod.LOAD_ZONES, mod.TIMEPOINTS, - rule=StorageNetCharge_rule) + mod.StorageNetCharge = Expression(mod.LOAD_ZONES, mod.TIMEPOINTS, rule=rule) # Register net charging with zonal energy balance. Discharging is already # covered by DispatchGen. mod.Zone_Power_Withdrawals.append('StorageNetCharge') + # use fixed energy/power ratio (# hours of capacity) when specified + mod.Enforce_Fixed_Energy_Storage_Ratio = Constraint( + mod.STORAGE_GEN_BLD_YRS, + rule=lambda m, g, y: + Constraint.Skip if m.gen_storage_energy_to_power_ratio[g] == float("inf") # no value specified + else + (m.BuildStorageEnergy[g, y] == m.gen_storage_energy_to_power_ratio[g] * m.BuildGen[g, y]) + ) + def Charge_Storage_Upper_Limit_rule(m, g, t): return m.ChargeStorage[g,t] <= \ m.DispatchUpperLimit[g, t] * m.gen_store_to_release_ratio[g] @@ -178,6 +213,19 @@ def State_Of_Charge_Upper_Limit_rule(m, g, t): mod.STORAGE_GEN_TPS, rule=State_Of_Charge_Upper_Limit_rule) + # batteries can only complete the specified number of cycles per year, averaged over each period + mod.Battery_Cycle_Limit = Constraint( + mod.STORAGE_GEN_PERIODS, + rule=lambda m, g, p: + # solvers sometimes perform badly with infinite constraint + Constraint.Skip if m.gen_storage_max_cycles_per_year[g] == float('inf') + else ( + sum(m.DispatchGen[g, tp] * m.tp_duration_hrs[tp] for tp in m.TPS_IN_PERIOD[p]) + <= + m.gen_storage_max_cycles_per_year[g] * m.StorageEnergyCapacity[g, p] * m.period_length_years[p] + ) + ) + def load_inputs(mod, switch_data, inputs_dir): """ @@ -186,7 +234,8 @@ def load_inputs(mod, switch_data, inputs_dir): generation_projects_info.tab GENERATION_PROJECT, ... - gen_storage_efficiency, gen_store_to_release_ratio* + gen_storage_efficiency, gen_store_to_release_ratio*, + gen_storage_energy_to_power_ratio*, gen_storage_max_cycles_per_year* gen_build_costs.tab GENERATION_PROJECT, build_year, ... @@ -194,12 +243,19 @@ def load_inputs(mod, switch_data, inputs_dir): """ + # TODO: maybe move these columns to a storage_gen_info file to avoid the weird index + # reading and avoid having to create these extra columns for all projects; + # Alternatively, say that these values are specified for _all_ projects (maybe with None + # as default) and then define STORAGE_GENS as the subset of projects for which + # gen_storage_efficiency has been specified, then require valid settings for all + # STORAGE_GENS. switch_data.load_aug( filename=os.path.join(inputs_dir, 'generation_projects_info.tab'), auto_select=True, - optional_params=['gen_store_to_release_ratio'], - param=(mod.gen_storage_efficiency, mod.gen_store_to_release_ratio)) + optional_params=['gen_store_to_release_ratio', 'gen_storage_energy_to_power_ratio', 'gen_storage_max_cycles_per_year'], + param=(mod.gen_storage_efficiency, mod.gen_store_to_release_ratio, mod.gen_storage_energy_to_power_ratio, mod.gen_storage_max_cycles_per_year)) # Base the set of storage projects on storage efficiency being specified. + # TODO: define this in a more normal way switch_data.data()['STORAGE_GENS'] = { None: switch_data.data(name='gen_storage_efficiency').keys()} switch_data.load_aug( @@ -217,7 +273,7 @@ def post_solve(instance, outdir): reporting.write_table( instance, instance.STORAGE_GEN_BLD_YRS, output_file=os.path.join(outdir, "storage_builds.txt"), - headings=("project", "period", "load_zone", + headings=("generation_project", "period", "load_zone", "IncrementalPowerCapacityMW", "IncrementalEnergyCapacityMWh", "OnlinePowerCapacityMW", "OnlineEnergyCapacityMWh" ), values=lambda m, g, bld_yr: ( @@ -228,7 +284,7 @@ def post_solve(instance, outdir): reporting.write_table( instance, instance.STORAGE_GEN_TPS, output_file=os.path.join(outdir, "storage_dispatch.txt"), - headings=("project", "timepoint", "load_zone", + headings=("generation_project", "timepoint", "load_zone", "ChargeMW", "DischargeMW", "StateOfCharge"), values=lambda m, g, t: ( g, m.tp_timestamp[t], m.gen_load_zone[g], diff --git a/switch_model/hawaii/reserves.py b/switch_model/hawaii/reserves.py index 9b915f150..d69ecf439 100644 --- a/switch_model/hawaii/reserves.py +++ b/switch_model/hawaii/reserves.py @@ -72,7 +72,7 @@ def define_components(m): # more conservative values (found by giving 10x weight to times when we provide less reserves than GE): # [1., 1., 1., 0.25760558, 0.18027923, 0.49123101] - m.regulating_reserve_requirement_mw = Expression(m.TIMEPOINTS, rule=lambda m, tp: sum( + m.RegulatingReserveRequirementMW = Expression(m.TIMEPOINTS, rule=lambda m, tp: sum( m.GenCapacity[g, m.tp_period[tp]] * min( m.regulating_reserve_fraction[m.gen_tech[g]] * m.gen_max_capacity_factor[g, tp], @@ -125,7 +125,7 @@ def define_dynamic_components(m): # Calculate total spinning reserve requirements m.SpinningReserveUpRequirement = Expression(m.TIMEPOINTS, rule=lambda m, tp: - m.regulating_reserve_requirement_mw[tp] + m.ContingencyReserveUpRequirement[tp] + m.RegulatingReserveRequirementMW[tp] + m.ContingencyReserveUpRequirement[tp] ) m.SpinningReserveDownRequirement = Expression(m.TIMEPOINTS, rule=lambda m, tp: m.ContingencyReserveDownRequirement[tp] @@ -134,12 +134,25 @@ def define_dynamic_components(m): # Available reserves def expr(m, tp): - avail = sum(m.DispatchSlackUp[p, tp] for p in m.FIRM_GENS if (p, tp) in m.GEN_TPS) + STORAGE_GENS = getattr(m, 'STORAGE_GENS', []) + # all regular generators; omit storage because they'll be added separately if needed + avail = sum( + m.DispatchSlackUp[g, tp] + for g in m.FIRM_GENS + if (g, tp) in m.GEN_TPS and g not in STORAGE_GENS + ) if m.options.reserves_from_storage: + # hawaii battery and hydrogen modules if hasattr(m, 'BatterySlackUp'): avail += sum(m.BatterySlackUp[z, tp] for z in m.LOAD_ZONES) if hasattr(m, 'HydrogenSlackUp'): avail += sum(m.HydrogenSlackUp[z, tp] for z in m.LOAD_ZONES) + # standard storage module (can stop charging and raise output to max) + avail += sum( + m.DispatchSlackUp[g, tp] + m.ChargeStorage[g, tp] + for g in STORAGE_GENS + if (g, tp) in m.GEN_TPS + ) if m.options.reserves_from_demand_response: if hasattr(m, 'DemandUpReserves'): avail += sum(m.DemandUpReserves[z, tp] for z in m.LOAD_ZONES) @@ -155,19 +168,37 @@ def expr(m, tp): return avail m.SpinningReservesUpAvailable = Expression(m.TIMEPOINTS, rule=expr) def expr(m, tp): - avail = sum(m.DispatchSlackDown[p, tp] for p in m.FIRM_GENS if (p, tp) in m.GEN_TPS) + STORAGE_GENS = getattr(m, 'STORAGE_GENS', []) + # all regular generators; omit storage because they'll be added separately if needed + avail = sum( + m.DispatchSlackDown[g, tp] + for g in m.FIRM_GENS + if (g, tp) in m.GEN_TPS and g not in STORAGE_GENS + ) if m.options.reserves_from_storage: if hasattr(m, 'BatterySlackDown'): avail += sum(m.BatterySlackDown[z, tp] for z in m.LOAD_ZONES) if hasattr(m, 'HydrogenSlackDown'): avail += sum(m.HydrogenSlackDown[z, tp] for z in m.LOAD_ZONES) + # standard storage module (can stop producing power and raise charging to max) + avail += sum( + m.DispatchSlackDown[g, tp] + + m.DispatchUpperLimit[g, tp] * m.gen_store_to_release_ratio[g] + - m.ChargeStorage[g, tp] + for g in STORAGE_GENS + if (g, tp) in m.GEN_TPS + ) + if m.options.reserves_from_demand_response: if hasattr(m, 'DemandDownReserves'): avail += sum(m.DemandDownReserves[z, tp] for z in m.LOAD_ZONES) if hasattr(m, 'ShiftDemand'): # avail += sum(m.ShiftDemand[z, tp].ub - m.ShiftDemand[z, tp] for z in m.LOAD_ZONES) - ub = 24/3 * m.demand_response_max_share * m.zone_demand_mw[z, tp] - avail += sum(ub - m.ShiftDemand[z, tp] for z in m.LOAD_ZONES) + avail += sum( + 24/3 * m.demand_response_max_share * m.zone_demand_mw[z, tp] + - m.ShiftDemand[z, tp] + for z in m.LOAD_ZONES + ) # note: we currently ignore down-reserves (option of increasing consumption) # from EVs since it's not clear how high they could go; we could revisit this if # down-reserves have a positive price at equilibrium (probabably won't) @@ -225,6 +256,6 @@ def expr(m, tp): # switch_data.load_aug( # filename=os.path.join(inputs_dir, 'reserve_requirements.tab'), # auto_select=True, -# param=(m.regulating_reserve_requirement_mw)) +# param=(m.RegulatingReserveRequirementMW)) diff --git a/switch_model/hawaii/scenario_data.py b/switch_model/hawaii/scenario_data.py index 435d240fe..e103c4e27 100644 --- a/switch_model/hawaii/scenario_data.py +++ b/switch_model/hawaii/scenario_data.py @@ -384,8 +384,11 @@ def write_tables(**args): cogen as gen_is_cogen, non_cycling as gen_non_cycling, variable_o_m * 1000.0 AS gen_variable_om, - CASE WHEN fuel IN ('SUN', 'WND', 'MSW') THEN fuel ELSE 'multiple' END AS gen_energy_source, - CASE WHEN fuel IN ('SUN', 'WND', 'MSW') THEN null ELSE {flhr} END AS gen_full_load_heat_rate + CASE WHEN fuel IN ('SUN', 'WND', 'MSW', 'Battery') THEN fuel ELSE 'multiple' END AS gen_energy_source, + CASE WHEN fuel IN ('SUN', 'WND', 'MSW', 'Battery') THEN null ELSE {flhr} END AS gen_full_load_heat_rate, + gen_storage_efficiency, + gen_storage_energy_to_power_ratio, + gen_storage_max_cycles_per_year FROM study_projects JOIN study_generator_info USING (technology) ORDER BY 2, 3, 1; """.format(fo=forced_outage_rate, flhr=full_load_heat_rate), args) @@ -402,6 +405,8 @@ def write_tables(**args): # NOTE: these costs must be expressed in $/MW, $/MWh or $/MW-year, # not $/kW, $/kWh or $/kW-year. + # NOTE: for now, we only specify storage costs per unit of power, not + # on per unit of energy, so we insert $0 as the energy cost here. write_table('gen_build_costs.tab', """ WITH gen_build_costs AS ( SELECT @@ -410,6 +415,8 @@ def write_tables(**args): c.capital_cost_per_kw * 1000.0 * power(1.0+%(inflation_rate)s, %(base_financial_year)s-c.base_year) AS gen_overnight_cost, + CASE WHEN i.gen_storage_efficiency IS NULL THEN NULL ELSE 0.0 END + AS gen_storage_energy_overnight_cost, i.fixed_o_m * 1000.0 * power(1.0+%(inflation_rate)s, %(base_financial_year)s-i.base_year) AS gen_fixed_o_m FROM study_generator_info i @@ -425,12 +432,14 @@ def write_tables(**args): build_year, sum(proj_overnight_cost * 1000.0 * proj_existing_cap) / sum(proj_existing_cap) AS gen_overnight_cost, + null AS gen_storage_energy_overnight_cost, sum(proj_fixed_om * 1000.0 * proj_existing_cap) / sum(proj_existing_cap) AS gen_fixed_om FROM study_projects JOIN proj_existing_builds USING (project_id) GROUP BY 1, 2 UNION - SELECT "GENERATION_PROJECT", build_year, gen_overnight_cost, gen_fixed_o_m + SELECT "GENERATION_PROJECT", build_year, gen_overnight_cost, + gen_storage_energy_overnight_cost, gen_fixed_o_m FROM gen_build_costs JOIN study_projects USING (technology) ORDER BY 1, 2; """, args) @@ -502,7 +511,7 @@ def write_tables(**args): cogen FROM study_generator_info ), all_fueled_techs AS ( - SELECT * from all_techs WHERE orig_fuel NOT IN ('SUN', 'WND', 'MSW') + SELECT * from all_techs WHERE orig_fuel NOT IN ('SUN', 'WND', 'MSW', 'Battery') ), gen_multiple_fuels AS ( SELECT DISTINCT technology, b.energy_source as fuel FROM all_fueled_techs t @@ -598,26 +607,25 @@ def write_tables(**args): ######################### # trans_dispatch # --- Not used --- - - + ######################### # batteries - # TODO: put these data in a database and write a .tab file instead - bat_years = 'BATTERY_CAPITAL_COST_YEARS' - bat_cost = 'battery_capital_cost_per_mwh_capacity_by_year' - write_dat_file( - 'batteries.dat', - sorted([k for k in args if k.startswith('battery_') and k not in [bat_years, bat_cost]]), - args - ) - if bat_years in args and bat_cost in args: - # annual costs were provided -- write those to a tab file - write_tab_file( - 'battery_capital_cost.tab', - headers=[bat_years, bat_cost], - data=zip(args[bat_years], args[bat_cost]), - arguments=args - ) + # (now included as standard storage projects) + # bat_years = 'BATTERY_CAPITAL_COST_YEARS' + # bat_cost = 'battery_capital_cost_per_mwh_capacity_by_year' + # write_dat_file( + # 'batteries.dat', + # sorted([k for k in args if k.startswith('battery_') and k not in [bat_years, bat_cost]]), + # args + # ) + # if bat_years in args and bat_cost in args: + # # annual costs were provided -- write those to a tab file + # write_tab_file( + # 'battery_capital_cost.tab', + # headers=[bat_years, bat_cost], + # data=zip(args[bat_years], args[bat_cost]), + # arguments=args + # ) ######################### # EV annual energy consumption From d21d4660247b5f1ad81a85482a53b3672005a82d Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 17 Jan 2018 10:58:11 -1000 Subject: [PATCH 12/51] copy spinning_reserves as starting point for spinning_reserves_advanced --- .../spinning_reserves_advanced.py | 536 ++++++++++++++++++ 1 file changed, 536 insertions(+) create mode 100644 switch_model/balancing/operating_reserves/spinning_reserves_advanced.py diff --git a/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py b/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py new file mode 100644 index 000000000..b53c83472 --- /dev/null +++ b/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py @@ -0,0 +1,536 @@ +# Copyright (c) 2015-2017 The Switch Authors. All rights reserved. +# Licensed under the Apache License, Version 2.0, which is in the LICENSE file. +""" +A simple and flexible model of spinning reserves that tracks the state of unit +commitment and dispatched capacity to ensures that the generation fleet has +enough up- and down- ramping capacity to satisfy reserve requirements. The +unit commitment module is a prerequisite for spinning reserves. This +formulation does not consider ramping speed or duration requirements, just MW +of upward and downward ramping capability. + +Spinning reserve requirements can be customized through use of configuration +parameters and can include n-1 contingencies (either from generation units or +entire generation plants), as well as variability of load and variable +renewable resources. This lumps together regulating reserves, load following +reserves, and contingency reserves without distinguishing their timescales or +required response duration. Operating reserves at timescales with slower +responses for load following or longer-term recovery from contingencies are not +included here. + +Most regions and countries use distinct terminology for reserves products and +distinct procedures for determining reserve requirements. This module provides +a simple approach to spinning reserve requirements, which can be extended by +other module via registering with dynamic lists. Detailed regional studies may +need to write their own reserve modules to reflect specific regional reserve +definitions and policies. + +Notes: + +This formulation only considers ramping capacity (MW), not duration or speed. +The lack of duration requirements could cause problems if a significant amount +of capacity is energy limited such as demand response, storage, or hydro. +California now has a duration requirement of 3 hours for some classes of +operating reserves. The lack of ramping speed could cause issues if the +generators that are earmarked for providing spinning reserves have significant +differences in ramping speeds that are important to account for. This +formulation could be extended in the future to break reserve products into +different categories based on overall response time (ramping speed & +telemetry), and specify different reserve requirements for various response +times: <1sec, <1 min, <5min, <15min, <1hr, 1day. + +One standard (nonlinear) methodology for calculating reserve requirements +looks something like: k * sqrt(sigma_load^2 + sigma_renewable^2), where k is a +constant reflecting capacity requirements (typically in the range of 3-5), and +sigma's denote standard deviation in units of MW. Depending on the study, +sigma may be calculated on timescales of seconds to minutes. Several studies +estimate the sigmas with linear approximations. Some studies set +sigma_renewable as a function of renewable output, especially for wind where +power output shows the highest variability in the 40-60% output range because +that is the steepest section of its power production curve. This formulation +is not used here because the signma_renewable term would need to be +approximated using renewable power output, making this equation non-linear +with respect to dispatch decision variables. + +Other studies have used linear equations for estimating reserve requirements: + +The Western Wind and Solar Integration study suggested a heuristic of 3% * +load + 5% * renewable_output for spinning reserve capacity requirements, and +the same amount for quick start capacity requirements. + +Halamay 2011 derives spinning reserve requirements of +2.1% / -2.8% of load +and ~ +2% / -3% for renewables to balance natural variability, and derives +non-spinning reserve requirements and +3.5% / -4.0% of load and ~ +/- 4% for +renewables to balance hour-ahead forecast errors. + +Note: Most research appears to be headed towards dynamic and probabilistic +techniques, rather than the static approximations used here. + +References on operating reserves follow. + +Ela, Erik, et al. "Evolution of operating reserve determination in wind power +integration studies." Power and Energy Society General Meeting, 2010 IEEE. +http://www.nrel.gov/docs/fy11osti/49100.pdf + +Milligan, Michael, et al. "Operating reserves and wind power integration: An +international comparison." proc. 9th International Workshop on large-scale +integration of wind power into power systems. 2010. +http://www.nrel.gov/docs/fy11osti/49019.pdf + +Halamay, Douglas A., et al. "Reserve requirement impacts of large-scale +integration of wind, solar, and ocean wave power generation." IEEE +Transactions on Sustainable Energy 2.3 (2011): 321-328. +http://nnmrec.oregonstate.edu/sites/nnmrec.oregonstate.edu/files/PES_GM_2010_HalamayVariability_y09m11d30h13m26_DAH.pdf + +Ibanez, Eduardo, Ibrahim Krad, and Erik Ela. "A systematic comparison of +operating reserve methodologies." PES General Meeting| Conference & +Exposition, 2014 IEEE. http://www.nrel.gov/docs/fy14osti/61016.pdf + +""" +import os +from pyomo.environ import * + +dependencies = ( + 'switch_model.timescales', + 'switch_model.balancing.load_zones', + 'switch_model.balancing.operating_reserves.areas', + 'switch_model.financials', + 'switch_model.energy_sources.properties', + 'switch_model.generators.core.build', + 'switch_model.generators.core.dispatch', + 'switch_model.generators.core.commit.operate', +) + + +def define_arguments(argparser): + group = argparser.add_argument_group(__name__) + group.add_argument('--unit-contingency', default=False, + dest='unit_contingency', action='store_true', + help=("This will enable an n-1 contingency based on a single unit of " + "a generation project falling offline. Note: This create a new " + "binary variable for each project and timepoint that has a " + "proj_unit_size specified.") + ) + group.add_argument('--project-contingency', default=False, + dest='project_contingency', action='store_true', + help=("This will enable an n-1 contingency based on the entire " + "committed capacity of a generation project falling offline. " + "Unlike unit contingencies, this is a purely linear expression.") + ) + group.add_argument('--spinning-requirement-rule', default=None, + dest='spinning_requirement_rule', + choices = ["Hawaii", "3+5"], + help=("Choose rules for spinning reserves requirements as a function " + "of variable renewable power and load. Hawaii uses rules " + "bootstrapped from the GE RPS study, and '3+5' requires 3% of " + "load and 5% of variable renewable output, based on the heuristic " + "described in the 2010 Western Wind and Solar Integration Study.") + ) + + + + +def define_dynamic_lists(m): + """ + Spinning_Reserve_Up_Requirements and Spinning_Reserve_Down_Requirements + are lists of model components that contribute to spinning reserve + requirements in each balancing area and timepoint. + + Spinning_Reserve_Up_Provisions and Spinning_Reserve_Down_Provisions are + lists of model components that help satisfy spinning reserve requirements + in each balancing area and timepoint. + + Spinning_Reserve_Contingencies is a list of model components + describing maximum contingency events. Elements of this list will be + summarized into a Maximumcontingency variable that will be added to the + Spinning_Reserve_Up_Requirements list. + + Each component in every list needs to use units of MW and be indexed by: + (b, t) in BALANCING_AREA_TIMEPOINTS. + """ + m.Spinning_Reserve_Up_Requirements = [] + m.Spinning_Reserve_Down_Requirements = [] + m.Spinning_Reserve_Up_Provisions = [] + m.Spinning_Reserve_Down_Provisions = [] + m.Spinning_Reserve_Contingencies = [] + + +def gen_unit_contingency(m): + """ + Add components for unit-level contingencies. A generation project can + include one or more discretely sized generation units. This will model + contingencies of individual generation units that have discrete sizes + specified. Caution, this adds binary variables to the model for every + GEN_TPS for DISCRETELY_SIZED_GENS. This many binary variables can impact + runtime. + + UNIT_CONTINGENCY_DISPATCH_POINTS is a subset of GEN_TPS for + DISCRETELY_SIZED_GENS + + GenIsCommitted[(g,t) in UNIT_CONTINGENCY_DISPATCH_POINTS] is a binary + variable that tracks whether generation projects at least one units + committed. + + Enforce_GenIsCommitted[(g,t) in UNIT_CONTINGENCY_DISPATCH_POINTS] is a + constraint that enforces the tracking behavior of GenIsCommitted. + + GenUnitLargestContingency[(b,t) in BALANCING_AREA_TIMEPOINTS] is a + variable that tracks the size of the largest contingency in each balancing + area, accounting for all of the discretely sized units that are currently + committed. This is added to the dynamic list Spinning_Reserve_Contingencies. + + Enforce_GenUnitLargestContingency[(g,t) in UNIT_CONTINGENCY_DISPATCH_POINTS] + is a constraint that enforces the behavior of GenUnitLargestContingency, + by making GenUnitLargestContingency >= the capacity of each of the + committed units in its balancing area. + + """ + # UNIT_CONTINGENCY_DISPATCH_POINTS duplicates + # GEN_DISPATCH_POINTS_DISCRETE from generators.core.commit.discrete. I + # justify the duplication because I don't think discrete unit commitment + # should be a prerequisite for this functionality. + m.UNIT_CONTINGENCY_DISPATCH_POINTS = Set( + initialize=m.GEN_TPS, + filter=lambda m, g, tp: g in m.DISCRETELY_SIZED_GENS + ) + m.GenIsCommitted = Var( + m.UNIT_CONTINGENCY_DISPATCH_POINTS, + within=Binary, + doc="Stores the status of unit committment as a binary variable." + ) + m.Enforce_GenIsCommitted = Constraint( + m.UNIT_CONTINGENCY_DISPATCH_POINTS, + rule=lambda m, g, tp: + m.CommitGen[g, tp] <= m.GenIsCommitted[g, tp] * ( + m._gen_max_cap_for_binary_constraints + if g not in m.CAPACITY_LIMITED_GENS + else m.gen_capacity_limit_mw[g] + ) + ) + m.GenUnitLargestContingency = Var( + m.BALANCING_AREA_TIMEPOINTS, + doc="Largest generating unit that could drop offline.") + def Enforce_GenUnitLargestContingency_rule(m, g, t): + b = m.zone_balancing_area[m.gen_load_zone[g]] + return (m.GenUnitLargestContingency[b,t] >= + m.GenIsCommitted[g, t] * m.gen_unit_size[g]) + m.Enforce_GenUnitLargestContingency = Constraint( + m.UNIT_CONTINGENCY_DISPATCH_POINTS, + rule=Enforce_GenUnitLargestContingency_rule, + doc=("Force GenUnitLargestContingency to be at least as big as the " + "maximum unit contingency.") + ) + m.Spinning_Reserve_Contingencies.append('GenUnitLargestContingency') + + +def gen_project_contingency(m): + """ + Add components for project-level contingencies based on committed capacity. + A generation project can include one or more discretely sized generation + units. This will model contingencies of entire generation projects - + basically entire plants tripping offline, rather than individual + generation units in a plan tripping offline. + + GenProjectLargestContingency[(b,t) in BALANCING_AREA_TIMEPOINTS] is a + variable that tracks the size of the largest contingency in each balancing + area, accounting for all of the capacity that is committed. This is + added to the dynamic list Spinning_Reserve_Contingencies. + + Enforce_GenProjectLargestContingency[(g,t) in GEN_TPS] is a constraint + that enforces the behavior of GenProjectLargestContingency by making + GenProjectLargestContingency >= DispatchGen + for each generation project in a balancing area. If a generation project + is capable of providing upward reserves, then CommitGenSpinningReservesUp + is added to the right hand side. + + """ + m.GenProjectLargestContingency = Var( + m.BALANCING_AREA_TIMEPOINTS, + doc="Largest generating project that could drop offline.") + def Enforce_GenProjectLargestContingency_rule(m, g, t): + b = m.zone_balancing_area[m.gen_load_zone[g]] + if m.gen_can_provide_spinning_reserves[g]: + return m.GenProjectLargestContingency[b, t] >= \ + m.DispatchGen[g, t] + m.CommitGenSpinningReservesUp[g, t] + else: + return m.GenProjectLargestContingency[b, t] >= m.DispatchGen[g, t] + m.Enforce_GenProjectLargestContingency = Constraint( + m.GEN_TPS, + rule=Enforce_GenProjectLargestContingency_rule, + doc=("Force GenProjectLargestContingency to be at least as big as the " + "maximum generation project contingency.") + ) + m.Spinning_Reserve_Contingencies.append('GenProjectLargestContingency') + + +def hawaii_spinning_reserve_requirements(m): + # This may be more appropriate for a hawaii submodule until it is + # better documented and referenced. + # these parameters were found by regressing the reserve requirements from + # the GE RPS Study against wind and solar conditions each hour (see + # Dropbox/Research/Shared/Switch-Hawaii/ge_validation/source_data/ + # reserve_requirements_oahu_scenarios charts.xlsx and + # Dropbox/Research/Shared/Switch-Hawaii/ge_validation/ + # fit_renewable_reserves.ipynb ) + # TODO: supply these parameters in input files + m.var_gen_power_reserve = Param( + m.VARIABLE_GENS, default=1.0, + doc=("Spinning reserves required to back up variable renewable " + "generators, as fraction of potential output.") + ) + def var_gen_cap_reserve_limit_default(m, g): + if m.gen_energy_source[g] == 'Solar': + return 0.21288916 + elif m.gen_energy_source[g] == 'Wind': + return 0.21624407 + else: + raise RuntimeError() + m.var_gen_cap_reserve_limit = Param( + m.VARIABLE_GENS, + default=var_gen_cap_reserve_limit_default, + doc="Maximum spinning reserves required, as fraction of installed capacity" + ) + m.HawaiiVarGenUpSpinningReserveRequirement = Expression( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, t: sum( + m.GenCapacityInTP[g, t] + * min( + m.var_gen_power_reserve[g] * m.gen_max_capacity_factor[g, t], + m.var_gen_cap_reserve_limit[g] + ) + for g in m.VARIABLE_GENS + if (g, t) in m.VARIABLE_GEN_TPS and b == m.zone_balancing_area[m.gen_load_zone[g]]), + doc="The spinning reserves for backing up variable generation with Hawaii rules." + ) + m.Spinning_Reserve_Up_Requirements.append('HawaiiVarGenUpSpinningReserveRequirement') + + def HawaiiLoadDownSpinningReserveRequirement_rule(m, b, t): + if 'WithdrawFromCentralGrid' in dir(m): + load = m.WithdrawFromCentralGrid + else: + load = m.lz_demand_mw + return 0.10 * sum(load[z, t] for z in m.LOAD_ZONES if b == m.zone_balancing_area[z]) + m.HawaiiLoadDownSpinningReserveRequirement = Expression( + m.BALANCING_AREA_TIMEPOINTS, + rule=HawaiiLoadDownSpinningReserveRequirement_rule + ) + m.Spinning_Reserve_Down_Requirements.append('HawaiiLoadDownSpinningReserveRequirement') + + +def nrel_3_5_spinning_reserve_requirements(m): + """ + NREL35VarGenSpinningReserveRequirement[(b,t) in BALANCING_AREA_TIMEPOINTS] + is an expression for upward and downward spinning reserve requirements of + 3% of load plus 5% of renewable output, based on a heuristic described in + NREL's 2010 Western Wind and Solar Integration study. It is added to the + Spinning_Reserve_Up_Requirements and Spinning_Reserve_Down_Requirements + lists. If the local_td module is available with DER accounting, load will + be set to WithdrawFromCentralGrid. Otherwise load will be set to + lz_demand_mw. + """ + def NREL35VarGenSpinningReserveRequirement_rule(m, b, t): + if 'WithdrawFromCentralGrid' in dir(m): + load = m.WithdrawFromCentralGrid + else: + load = m.lz_demand_mw + return (0.03 * sum(load[z, t] for z in m.LOAD_ZONES + if b == m.zone_balancing_area[z]) + + 0.05 * sum(m.DispatchGen[g, t] for g in m.VARIABLE_GENS + if (g, t) in m.VARIABLE_GEN_TPS and + b == m.zone_balancing_area[m.gen_load_zone[g]])) + m.NREL35VarGenSpinningReserveRequirement = Expression( + m.BALANCING_AREA_TIMEPOINTS, + rule=NREL35VarGenSpinningReserveRequirement_rule + ) + m.Spinning_Reserve_Up_Requirements.append('NREL35VarGenSpinningReserveRequirement') + m.Spinning_Reserve_Down_Requirements.append('NREL35VarGenSpinningReserveRequirement') + + +def define_components(m): + """ + contingency_safety_factor is a parameter that increases the contingency + requirements. By default this is set to 2.0 to prevent the largest + generator from providing reserves for itself. + + gen_can_provide_spinning_reserves[g] is a binary flag indicating whether + the project is allowed to provide spinning reserves. + + SPINNING_RESERVE_GEN_TPS is a subset of GEN_TPS of generators that can + provide spinning reserves based on gen_can_provide_spinning_reserves. + + CommitGenSpinningReservesUp[(g,t) in SPINNING_RESERVE_GEN_TPS] is a + decision variable of how much upward spinning reserve capacity to commit + (in MW). + + CommitGenSpinningReservesDown[(g,t) in SPINNING_RESERVE_GEN_TPS] is a + corresponding variable for downward spinning reserves. + + CommitGenSpinningReservesUp_Limit[(g,t) in SPINNING_RESERVE_GEN_TPS] and + CommitGenSpinningReservesDown_Limit constraint the CommitGenSpinningReserves + variables based on DispatchSlackUp and DispatchSlackDown. + + CommittedSpinningReserveUp[(b,t) in BALANCING_AREA_TIMEPOINTS] and + CommittedSpinningReserveDown are expressions summarizing the + CommitGenSpinningReserves variables for generators within each balancing + area. + + Depending on the configuration parameters unit_contingency, + project_contingency and spinning_requirement_rule, other components may be + added by other functions which are documented above. + """ + m.contingency_safety_factor = Param(default=2.0, + doc=("The spinning reserve requiremet will be set to this value " + "times the maximum contingency. This defaults to 2 to ensure " + "that the largest generator cannot be providing contingency " + "reserves for itself.")) + m.gen_can_provide_spinning_reserves = Param( + m.GENERATION_PROJECTS, within=Boolean, default=True + ) + m.SPINNING_RESERVE_GEN_TPS = Set( + dimen=2, + initialize=m.GEN_TPS, + filter=lambda m, g, t: m.gen_can_provide_spinning_reserves[g]) + # CommitGenSpinningReservesUp and CommitGenSpinningReservesDown are + # variables instead of aliases to DispatchSlackUp & DispatchSlackDown + # because they may need to take on lower values to reduce the + # project-level contigencies, especially when discrete unit commitment is + # enabled, and committed capacity may exceed the amount of capacity that + # is strictly needed. Having these as variables also flags them for + # automatic export in model dumps and tab files, and opens up the + # possibility of further customizations like adding variable costs for + # spinning reserve provision. + m.CommitGenSpinningReservesUp = Var( + m.SPINNING_RESERVE_GEN_TPS, + within=NonNegativeReals + ) + m.CommitGenSpinningReservesDown = Var( + m.SPINNING_RESERVE_GEN_TPS, + within=NonNegativeReals + ) + m.CommitGenSpinningReservesUp_Limit = Constraint( + m.SPINNING_RESERVE_GEN_TPS, + rule=lambda m, g, t: \ + m.CommitGenSpinningReservesUp[g,t] <= m.DispatchSlackUp[g, t] + ) + m.CommitGenSpinningReservesDown_Limit = Constraint( + m.SPINNING_RESERVE_GEN_TPS, + rule=lambda m, g, t: \ + m.CommitGenSpinningReservesDown[g,t] <= m.DispatchSlackDown[g, t] + ) + + # Sum of spinning reserve capacity per balancing area and timepoint.. + m.CommittedSpinningReserveUp = Expression( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, t: \ + sum(m.CommitGenSpinningReservesUp[g, t] + for z in m.ZONES_IN_BALANCING_AREA[b] + for g in m.GENS_IN_ZONE[z] + if (g,t) in m.SPINNING_RESERVE_GEN_TPS + ) + ) + m.Spinning_Reserve_Up_Provisions.append('CommittedSpinningReserveUp') + m.CommittedSpinningReserveDown = Expression( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, t: \ + sum(m.CommitGenSpinningReservesDown[g, t] + for z in m.ZONES_IN_BALANCING_AREA[b] + for g in m.GENS_IN_ZONE[z] + if (g,t) in m.SPINNING_RESERVE_GEN_TPS + ) + ) + m.Spinning_Reserve_Down_Provisions.append('CommittedSpinningReserveDown') + + if m.options.unit_contingency: + gen_unit_contingency(m) + if m.options.project_contingency: + gen_project_contingency(m) + if m.options.spinning_requirement_rule == 'Hawaii': + hawaii_spinning_reserve_requirements(m) + elif m.options.spinning_requirement_rule == '3+5': + nrel_3_5_spinning_reserve_requirements(m) + + +def define_dynamic_components(m): + """ + MaximumContingency[(b,t) in BALANCING_AREA_TIMEPOINTS] is a variable that + tracks the size of the largest contingency in each balancing area, + accounting for every contingency that has been registered with + Spinning_Reserve_Contingencies. + + BALANCING_AREA_TIMEPOINT_CONTINGENCIES is a set of (b, t, contingency) formed + from the cross product of the set BALANCING_AREA_TIMEPOINTS and the dynamic + list Spinning_Reserve_Contingencies. + + Enforce_MaximumContingency[(b,t,contingency) in BALANCING_AREA_TIMEPOINT_CONTINGENCIES] + is a constraint that enforces the behavior of MaximumContingency by making + MaximumContingency >= contingency for each contingency registered in the + dynamic list Spinning_Reserve_Contingencies. + + Satisfy_Spinning_Reserve_Up_Requirement[(b,t) in BALANCING_AREA_TIMEPOINTS] + is a constraint that ensures upward spinning reserve requirements are + being satisfied based on the sums of the two dynamic lists + Spinning_Reserve_Up_Provisions and Spinning_Reserve_Up_Requirements. + + Satisfy_Spinning_Reserve_Down_Requirement[(b,t) in BALANCING_AREA_TIMEPOINTS] + is a matching constraint that uses the downward reserve lists. + """ + m.MaximumContingency = Var( + m.BALANCING_AREA_TIMEPOINTS, + doc=("Maximum of the registered Spinning_Reserve_Contingencies, after " + "multiplying by contingency_safety_factor.") + ) + m.BALANCING_AREA_TIMEPOINT_CONTINGENCIES = Set( + initialize=m.BALANCING_AREA_TIMEPOINTS * m.Spinning_Reserve_Contingencies, + doc=("The set of spinning reserve contingencies, copied from the " + "dynamic list Spinning_Reserve_Contingencies to simplify the " + "process of defining one constraint per contingency in the list.") + ) + m.Enforce_MaximumContingency = Constraint( + m.BALANCING_AREA_TIMEPOINT_CONTINGENCIES, + rule=lambda m, b, t, contingency: + m.MaximumContingency[b, t] >= m.contingency_safety_factor * getattr(m, contingency)[b, t] + ) + m.Spinning_Reserve_Up_Requirements.append('MaximumContingency') + + m.Satisfy_Spinning_Reserve_Up_Requirement = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, t: \ + sum(getattr(m, requirement)[b,t] + for requirement in m.Spinning_Reserve_Up_Requirements + ) <= + sum(getattr(m, provision)[b,t] + for provision in m.Spinning_Reserve_Up_Provisions + ) + ) + m.Satisfy_Spinning_Reserve_Down_Requirement = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, t: \ + sum(getattr(m, requirement)[b,t] + for requirement in m.Spinning_Reserve_Down_Requirements + ) <= + sum(getattr(m, provision)[b,t] + for provision in m.Spinning_Reserve_Down_Provisions + ) + ) + + +def load_inputs(m, switch_data, inputs_dir): + """ + All files & columns are optional. + + generation_projects_info.tab + GENERATION_PROJECTS, ... gen_can_provide_spinning_reserves + + spinning_reserve_params.dat may override the default value of + contingency_safety_factor. Note that is is a .dat file, not a .tab file. + """ + switch_data.load_aug( + filename=os.path.join(inputs_dir, 'generation_projects_info.tab'), + auto_select=True, + optional_params=['gen_can_provide_spinning_reserves'], + param=(m.gen_can_provide_spinning_reserves) + ) + switch_data.load_aug( + filename=os.path.join(inputs_dir, 'spinning_reserve_params.dat'), + optional=True, + ) + From 54856e0edb5dbe22c9ecf9e2d84bbed50a61c147 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 17 Jan 2018 11:02:08 -1000 Subject: [PATCH 13/51] allow multiple spinning reserve products and targets --- .../spinning_reserves_advanced.py | 440 ++++++++++++------ switch_model/generators/core/build.py | 3 + .../generators/core/commit/discrete.py | 15 +- switch_model/hawaii/demand_response_simple.py | 85 +++- switch_model/hawaii/ev.py | 58 ++- switch_model/hawaii/scenario_data.py | 39 ++ switch_model/utilities.py | 9 + 7 files changed, 468 insertions(+), 181 deletions(-) diff --git a/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py b/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py index b53c83472..e84671687 100644 --- a/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py +++ b/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py @@ -87,7 +87,11 @@ """ import os +from collections import defaultdict from pyomo.environ import * +from switch_model.utilities import iteritems + + dependencies = ( 'switch_model.timescales', @@ -103,21 +107,18 @@ def define_arguments(argparser): group = argparser.add_argument_group(__name__) - group.add_argument('--unit-contingency', default=False, - dest='unit_contingency', action='store_true', + group.add_argument('--unit-contingency', default=False, action='store_true', help=("This will enable an n-1 contingency based on a single unit of " "a generation project falling offline. Note: This create a new " - "binary variable for each project and timepoint that has a " - "proj_unit_size specified.") + "binary variable for each timepoint for each generation project " + "that has a gen_unit_size specified.") ) - group.add_argument('--project-contingency', default=False, - dest='project_contingency', action='store_true', + group.add_argument('--project-contingency', default=False, action='store_true', help=("This will enable an n-1 contingency based on the entire " "committed capacity of a generation project falling offline. " "Unlike unit contingencies, this is a purely linear expression.") ) group.add_argument('--spinning-requirement-rule', default=None, - dest='spinning_requirement_rule', choices = ["Hawaii", "3+5"], help=("Choose rules for spinning reserves requirements as a function " "of variable renewable power and load. Hawaii uses rules " @@ -125,33 +126,63 @@ def define_arguments(argparser): "load and 5% of variable renewable output, based on the heuristic " "described in the 2010 Western Wind and Solar Integration Study.") ) + # TODO: define these inputs in data files + group.add_argument( + '--contingency-reserve-type', + default='spinning', + help= + "Type of reserves to use to meet the contingency reserve requirements " + "defined for generation projects and sometimes for loss-of-load events " + "(e.g., 'contingency' or 'spinning'); default is 'spinning'." + ) + group.add_argument( + '--regulating-reserve-type', + default='spinning', + help= + "Type of reserves to use to meet the regulating reserve requirements " + "defined by the spinning requirements rule (e.g., 'spinning' or " + "'contingency'); default is 'spinning'." + ) def define_dynamic_lists(m): """ - Spinning_Reserve_Up_Requirements and Spinning_Reserve_Down_Requirements - are lists of model components that contribute to spinning reserve - requirements in each balancing area and timepoint. + Spinning_Reserve_Requirements and Spinning_Reserve_Provisions are + dicts of lists of components that contribute to the requirement or provision + of operating reserves. Entries in each dict are indexed by reserve + type (usually "regulation" or "contingency") and direction ("up" or "down"). + The dicts are setup as defaultdicts, so they will automatically + return an empty list if nothing has been added for a particular + type of reserves. + + Spinning_Reserve_Requirements contains lists of model components that + increase operating reserve requirements in each balancing area and timepoint. - Spinning_Reserve_Up_Provisions and Spinning_Reserve_Down_Provisions are - lists of model components that help satisfy spinning reserve requirements - in each balancing area and timepoint. + Spinning_Reserve_Provisions contains lists of model components that + help satisfy operating reserve requirements in each balancing area and + timepoint. Spinning_Reserve_Contingencies is a list of model components - describing maximum contingency events. Elements of this list will be - summarized into a Maximumcontingency variable that will be added to the - Spinning_Reserve_Up_Requirements list. + describing maximum contingency events. Elements of this list are + summarized into a MaximumContingency variable that is added to the + Spinning_Reserve_Requirements['contingency', 'up'] list. - Each component in every list needs to use units of MW and be indexed by: - (b, t) in BALANCING_AREA_TIMEPOINTS. + Each component in the Requirements and Provisions lists needs to use units + of MW and be indexed by reserve type, balancing area and timepoint. Missing + entries will be treated as zero (no reserves required or no reserves available). + + Each component in the Contingencies list should be in MW and indexed by + (ba, tp) in BALANCING_AREA_TIMEPOINTS. """ m.Spinning_Reserve_Up_Requirements = [] m.Spinning_Reserve_Down_Requirements = [] m.Spinning_Reserve_Up_Provisions = [] m.Spinning_Reserve_Down_Provisions = [] - m.Spinning_Reserve_Contingencies = [] + + m.Spinning_Reserve_Up_Contingencies = [] + m.Spinning_Reserve_Down_Contingencies = [] def gen_unit_contingency(m): @@ -185,12 +216,13 @@ def gen_unit_contingency(m): """ # UNIT_CONTINGENCY_DISPATCH_POINTS duplicates - # GEN_DISPATCH_POINTS_DISCRETE from generators.core.commit.discrete. I + # DISCRETE_GEN_TPS from generators.core.commit.discrete. I # justify the duplication because I don't think discrete unit commitment # should be a prerequisite for this functionality. m.UNIT_CONTINGENCY_DISPATCH_POINTS = Set( - initialize=m.GEN_TPS, - filter=lambda m, g, tp: g in m.DISCRETELY_SIZED_GENS + dimen=2, + initialize=lambda m: + [(g, t) for g in m.DISCRETELY_SIZED_GENS for t in m.TPS_FOR_GEN[g]] ) m.GenIsCommitted = Var( m.UNIT_CONTINGENCY_DISPATCH_POINTS, @@ -206,6 +238,9 @@ def gen_unit_contingency(m): else m.gen_capacity_limit_mw[g] ) ) + # TODO: would it be faster to add all generator contingencies directly + # to Spinning_Reserve_Contingencies instead of introducing this intermediate + # variable and constraint? m.GenUnitLargestContingency = Var( m.BALANCING_AREA_TIMEPOINTS, doc="Largest generating unit that could drop offline.") @@ -219,7 +254,7 @@ def Enforce_GenUnitLargestContingency_rule(m, g, t): doc=("Force GenUnitLargestContingency to be at least as big as the " "maximum unit contingency.") ) - m.Spinning_Reserve_Contingencies.append('GenUnitLargestContingency') + m.Spinning_Reserve_Up_Contingencies.append('GenUnitLargestContingency') def gen_project_contingency(m): @@ -259,8 +294,7 @@ def Enforce_GenProjectLargestContingency_rule(m, g, t): doc=("Force GenProjectLargestContingency to be at least as big as the " "maximum generation project contingency.") ) - m.Spinning_Reserve_Contingencies.append('GenProjectLargestContingency') - + m.Spinning_Reserve_Up_Contingencies.append('GenProjectLargestContingency') def hawaii_spinning_reserve_requirements(m): # This may be more appropriate for a hawaii submodule until it is @@ -271,49 +305,61 @@ def hawaii_spinning_reserve_requirements(m): # reserve_requirements_oahu_scenarios charts.xlsx and # Dropbox/Research/Shared/Switch-Hawaii/ge_validation/ # fit_renewable_reserves.ipynb ) - # TODO: supply these parameters in input files + # TODO: supply all the parameters for this function in input files + + # Calculate and register regulating reserve requirements + # (currently only considers variable generation, only underforecasting) + # (could eventually use some linearized quadratic formulation based + # on load, magnitude of renewables and geographic dispersion of renewables) m.var_gen_power_reserve = Param( m.VARIABLE_GENS, default=1.0, doc=("Spinning reserves required to back up variable renewable " "generators, as fraction of potential output.") ) def var_gen_cap_reserve_limit_default(m, g): - if m.gen_energy_source[g] == 'Solar': + if m.gen_energy_source[g] == 'SUN': return 0.21288916 - elif m.gen_energy_source[g] == 'Wind': + elif m.gen_energy_source[g] == 'WND': return 0.21624407 else: - raise RuntimeError() + raise ValueError( + "Unable to calculate reserve requirement for energy source {}".format(m.gen_energy_source[g]) + ) m.var_gen_cap_reserve_limit = Param( m.VARIABLE_GENS, default=var_gen_cap_reserve_limit_default, doc="Maximum spinning reserves required, as fraction of installed capacity" ) m.HawaiiVarGenUpSpinningReserveRequirement = Expression( + [m.options.regulating_reserve_type], m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: sum( + rule=lambda m, rt, b, t: sum( m.GenCapacityInTP[g, t] * min( m.var_gen_power_reserve[g] * m.gen_max_capacity_factor[g, t], m.var_gen_cap_reserve_limit[g] ) - for g in m.VARIABLE_GENS - if (g, t) in m.VARIABLE_GEN_TPS and b == m.zone_balancing_area[m.gen_load_zone[g]]), + for z in m.ZONES_IN_BALANCING_AREA[b] + for g in m.VARIABLE_GENS_IN_ZONE[z] + if (g, t) in m.VARIABLE_GEN_TPS), doc="The spinning reserves for backing up variable generation with Hawaii rules." ) m.Spinning_Reserve_Up_Requirements.append('HawaiiVarGenUpSpinningReserveRequirement') - def HawaiiLoadDownSpinningReserveRequirement_rule(m, b, t): - if 'WithdrawFromCentralGrid' in dir(m): - load = m.WithdrawFromCentralGrid - else: - load = m.lz_demand_mw - return 0.10 * sum(load[z, t] for z in m.LOAD_ZONES if b == m.zone_balancing_area[z]) - m.HawaiiLoadDownSpinningReserveRequirement = Expression( - m.BALANCING_AREA_TIMEPOINTS, - rule=HawaiiLoadDownSpinningReserveRequirement_rule + # Calculate and register loss-of-load (down) contingencies + if hasattr(m, 'WithdrawFromCentralGrid'): + rule = lambda m, ba, tp: 0.10 * sum( + m.WithdrawFromCentralGrid[z, tp] for z in m.ZONES_IN_BALANCING_AREA[ba] + ) + else: + # TODO: include effect of demand response here + rule = lambda m, ba, tp: 0.10 * sum( + m.zone_demand_mw[z, tp] for z in m.ZONES_IN_BALANCING_AREA[ba] + ) + m.HawaiiLoadDownContingency = Expression( + m.BALANCING_AREA_TIMEPOINTS, rule=rule ) - m.Spinning_Reserve_Down_Requirements.append('HawaiiLoadDownSpinningReserveRequirement') + m.Spinning_Reserve_Down_Contingencies.append('HawaiiLoadDownContingency') def nrel_3_5_spinning_reserve_requirements(m): @@ -325,19 +371,26 @@ def nrel_3_5_spinning_reserve_requirements(m): Spinning_Reserve_Up_Requirements and Spinning_Reserve_Down_Requirements lists. If the local_td module is available with DER accounting, load will be set to WithdrawFromCentralGrid. Otherwise load will be set to - lz_demand_mw. + zone_demand_mw. """ - def NREL35VarGenSpinningReserveRequirement_rule(m, b, t): - if 'WithdrawFromCentralGrid' in dir(m): + + def NREL35VarGenSpinningReserveRequirement_rule(m, rt, b, t): + try: load = m.WithdrawFromCentralGrid - else: - load = m.lz_demand_mw - return (0.03 * sum(load[z, t] for z in m.LOAD_ZONES - if b == m.zone_balancing_area[z]) - + 0.05 * sum(m.DispatchGen[g, t] for g in m.VARIABLE_GENS - if (g, t) in m.VARIABLE_GEN_TPS and - b == m.zone_balancing_area[m.gen_load_zone[g]])) + except AttributeError: + load = m.zone_demand_mw + return ( + 0.03 * sum(load[z, t] for z in m.LOAD_ZONES_IN_BALANCING_AREA[b]) + + + 0.05 * sum( + m.DispatchGen[g, t] + for z in m.ZONES_IN_BALANCING_AREA[b] + for g in m.VARIABLE_GENS_IN_ZONE[z] + if (g, t) in m.VARIABLE_GEN_TPS + ) + ) m.NREL35VarGenSpinningReserveRequirement = Expression( + [m.options.regulating_reserve_type], m.BALANCING_AREA_TIMEPOINTS, rule=NREL35VarGenSpinningReserveRequirement_rule ) @@ -351,20 +404,28 @@ def define_components(m): requirements. By default this is set to 2.0 to prevent the largest generator from providing reserves for itself. - gen_can_provide_spinning_reserves[g] is a binary flag indicating whether - the project is allowed to provide spinning reserves. + GEN_SPINNING_RESERVE_TYPES is a set of all allowed reserve types for each generation + project. This is read from generation_projects_reserve_availability.tab. + If that file doesn't exist, this defaults to GENERATION_PROJECTS x {"spinning"} + + gen_reserve_type_max_share specifies the maximum amount of committed + capacity that can be used to provide each type of reserves. It is indexed + by GEN_SPINNING_RESERVE_TYPES. This is read from generation_projects_reserve_availability.tab + and defaults to 1 if not specified. - SPINNING_RESERVE_GEN_TPS is a subset of GEN_TPS of generators that can + + + SPINNING_RESERVE_CAPABLE_GEN_TPS is a subset of GEN_TPS of generators that can provide spinning reserves based on gen_can_provide_spinning_reserves. - CommitGenSpinningReservesUp[(g,t) in SPINNING_RESERVE_GEN_TPS] is a + CommitGenSpinningReservesUp[(g,t) in SPINNING_SPINNING_RESERVE_CAPABLE_GEN_TPS] is a decision variable of how much upward spinning reserve capacity to commit (in MW). - CommitGenSpinningReservesDown[(g,t) in SPINNING_RESERVE_GEN_TPS] is a + CommitGenSpinningReservesDown[(g,t) in SPINNING_SPINNING_RESERVE_CAPABLE_GEN_TPS] is a corresponding variable for downward spinning reserves. - CommitGenSpinningReservesUp_Limit[(g,t) in SPINNING_RESERVE_GEN_TPS] and + CommitGenSpinningReservesUp_Limit[(g,t) in SPINNING_SPINNING_RESERVE_CAPABLE_GEN_TPS] and CommitGenSpinningReservesDown_Limit constraint the CommitGenSpinningReserves variables based on DispatchSlackUp and DispatchSlackDown. @@ -377,68 +438,116 @@ def define_components(m): project_contingency and spinning_requirement_rule, other components may be added by other functions which are documented above. """ - m.contingency_safety_factor = Param(default=2.0, + m.contingency_safety_factor = Param(default=1.0, doc=("The spinning reserve requiremet will be set to this value " - "times the maximum contingency. This defaults to 2 to ensure " - "that the largest generator cannot be providing contingency " - "reserves for itself.")) - m.gen_can_provide_spinning_reserves = Param( - m.GENERATION_PROJECTS, within=Boolean, default=True + "times the maximum contingency. This defaults to 1 to provide " + "n-1 security for the largest committed generator. ")) + + m.GEN_SPINNING_RESERVE_TYPES = Set(dimen=2) + m.gen_reserve_type_max_share = Param( + m.GEN_SPINNING_RESERVE_TYPES, + within=PercentFraction, + default=1.0 ) - m.SPINNING_RESERVE_GEN_TPS = Set( - dimen=2, - initialize=m.GEN_TPS, - filter=lambda m, g, t: m.gen_can_provide_spinning_reserves[g]) - # CommitGenSpinningReservesUp and CommitGenSpinningReservesDown are - # variables instead of aliases to DispatchSlackUp & DispatchSlackDown - # because they may need to take on lower values to reduce the - # project-level contigencies, especially when discrete unit commitment is - # enabled, and committed capacity may exceed the amount of capacity that - # is strictly needed. Having these as variables also flags them for - # automatic export in model dumps and tab files, and opens up the - # possibility of further customizations like adding variable costs for - # spinning reserve provision. - m.CommitGenSpinningReservesUp = Var( - m.SPINNING_RESERVE_GEN_TPS, - within=NonNegativeReals - ) - m.CommitGenSpinningReservesDown = Var( - m.SPINNING_RESERVE_GEN_TPS, - within=NonNegativeReals + + # TODO: go back to calling all of these spinning reserves instead of operating reserves, + # since they all have to be backed by spinning (committed) capacity for now. Also prefix + # "reserve" with "spinning" everywhere, to distinguish from planning reserves. Then the + # terminology will be pretty similar to the current spinning_reserves module. + + # reserve types that are supplied by generation projects + # and generation projects that can provide reserves + # note: these are also the indexing sets of the above set arrays; maybe that could be used? + m.SPINNING_RESERVE_TYPES_FROM_GENS = Set(initialize=lambda m: set(rt for (g, rt) in m.GEN_SPINNING_RESERVE_TYPES)) + m.SPINNING_RESERVE_CAPABLE_GENS = Set(initialize=lambda m: set(g for (g, rt) in m.GEN_SPINNING_RESERVE_TYPES)) + + # slice GEN_SPINNING_RESERVE_TYPES both ways for later use + def rule(m): + m.SPINNING_RESERVE_TYPES_FOR_GEN_dict = defaultdict(list) + m.GENS_FOR_SPINNING_RESERVE_TYPE_dict = defaultdict(list) + for g, rt in m.GEN_SPINNING_RESERVE_TYPES: + m.SPINNING_RESERVE_TYPES_FOR_GEN_dict[g].append(rt) + m.GENS_FOR_SPINNING_RESERVE_TYPE_dict[rt].append(g) + m.build_spinning_reserve_indexed_sets = BuildAction(rule=rule) + + m.SPINNING_RESERVE_TYPES_FOR_GEN = Set( + m.SPINNING_RESERVE_CAPABLE_GENS, + rule=lambda m, g: m.SPINNING_RESERVE_TYPES_FOR_GEN_dict.pop(g) + ) + m.GENS_FOR_SPINNING_RESERVE_TYPE = Set( + m.SPINNING_RESERVE_TYPES_FROM_GENS, + rule=lambda m, rt: m.GENS_FOR_SPINNING_RESERVE_TYPE_dict.pop(rt) ) + + # types, generators and timepoints when reserves could be supplied + m.SPINNING_RESERVE_TYPE_GEN_TPS = Set(dimen=3, initialize=lambda m: ( + (rt, g, tp) + for g, rt in m.GEN_SPINNING_RESERVE_TYPES + for tp in m.TPS_FOR_GEN[g] + )) + # generators and timepoints when reserves could be supplied + m.SPINNING_RESERVE_CAPABLE_GEN_TPS = Set(dimen=2, initialize=lambda m: ( + (g, tp) + for g in m.SPINNING_RESERVE_CAPABLE_GENS + for tp in m.TPS_FOR_GEN[g] + )) + + # decide how much of each type of reserves to produce from each generator + # during each timepoint + m.CommitGenSpinningReservesUp = Var(m.SPINNING_RESERVE_TYPE_GEN_TPS, within=NonNegativeReals) + m.CommitGenSpinningReservesDown = Var(m.SPINNING_RESERVE_TYPE_GEN_TPS, within=NonNegativeReals) + + # constrain reserve provision appropriately m.CommitGenSpinningReservesUp_Limit = Constraint( - m.SPINNING_RESERVE_GEN_TPS, - rule=lambda m, g, t: \ - m.CommitGenSpinningReservesUp[g,t] <= m.DispatchSlackUp[g, t] + m.SPINNING_RESERVE_CAPABLE_GEN_TPS, + rule=lambda m, g, tp: + sum(m.CommitGenSpinningReservesUp[rt, g, tp] for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g]) + <= + m.DispatchSlackUp[g, tp] + # storage can give more up response by stopping charging + + (m.ChargeStorage[g, tp] if g in getattr(m, 'STORAGE_GENS', []) else 0.0) ) m.CommitGenSpinningReservesDown_Limit = Constraint( - m.SPINNING_RESERVE_GEN_TPS, - rule=lambda m, g, t: \ - m.CommitGenSpinningReservesDown[g,t] <= m.DispatchSlackDown[g, t] + m.SPINNING_RESERVE_CAPABLE_GEN_TPS, + rule=lambda m, g, tp: + sum(m.CommitGenSpinningReservesDown[rt, g, tp] for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g]) + <= + m.DispatchSlackDown[g, tp] + + ( # storage could give more down response by raising ChargeStorage to the maximum rate + (m.DispatchUpperLimit[g, tp] * m.gen_store_to_release_ratio[g] - m.ChargeStorage[g, tp]) + if g in getattr(m, 'STORAGE_GENS', []) + else 0.0 + ) ) - # Sum of spinning reserve capacity per balancing area and timepoint.. - m.CommittedSpinningReserveUp = Expression( - m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: \ - sum(m.CommitGenSpinningReservesUp[g, t] - for z in m.ZONES_IN_BALANCING_AREA[b] - for g in m.GENS_IN_ZONE[z] - if (g,t) in m.SPINNING_RESERVE_GEN_TPS - ) + # Calculate total spinning reserves from generation projects, + # and add to the list of reserve provisions + def rule(m): + d = m.TotalGenSpinningReserves_dict = defaultdict(float) + for g, rt in m.GEN_SPINNING_RESERVE_TYPES: + ba = m.zone_balancing_area[m.gen_load_zone[g]] + for tp in m.TPS_FOR_GEN[g]: + d[rt, 'up', ba, tp] += m.CommitGenSpinningReservesUp[rt, g, tp] + d[rt, 'down', ba, tp] += m.CommitGenSpinningReservesDown[rt, g, tp] + m.TotalGenSpinningReserves_aggregate = BuildAction(rule=rule) + + m.TotalGenSpinningReservesUp = Expression( + m.SPINNING_RESERVE_TYPES_FROM_GENS, + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, rt, ba, tp: + m.TotalGenSpinningReserves_dict.pop((rt, 'up', ba, tp), 0.0) ) - m.Spinning_Reserve_Up_Provisions.append('CommittedSpinningReserveUp') - m.CommittedSpinningReserveDown = Expression( - m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: \ - sum(m.CommitGenSpinningReservesDown[g, t] - for z in m.ZONES_IN_BALANCING_AREA[b] - for g in m.GENS_IN_ZONE[z] - if (g,t) in m.SPINNING_RESERVE_GEN_TPS - ) + m.TotalGenSpinningReservesDown = Expression( + m.SPINNING_RESERVE_TYPES_FROM_GENS, + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, rt, ba, tp: + m.TotalGenSpinningReserves_dict.pop((rt, 'down', ba, tp), 0.0) ) - m.Spinning_Reserve_Down_Provisions.append('CommittedSpinningReserveDown') + m.Spinning_Reserve_Up_Provisions.append('TotalGenSpinningReservesUp') + m.Spinning_Reserve_Down_Provisions.append('TotalGenSpinningReservesDown') + + # define reserve requirements if m.options.unit_contingency: gen_unit_contingency(m) if m.options.project_contingency: @@ -473,43 +582,87 @@ def define_dynamic_components(m): Satisfy_Spinning_Reserve_Down_Requirement[(b,t) in BALANCING_AREA_TIMEPOINTS] is a matching constraint that uses the downward reserve lists. """ - m.MaximumContingency = Var( + + # TODO: add contingency down reserves (loss-of-load events) + + # define largest contingencies + m.MaximumContingencyUp = Var( + m.BALANCING_AREA_TIMEPOINTS, + doc=("Maximum of the registered Spinning_Reserve_Up_Contingencies, after " + "multiplying by contingency_safety_factor.") + ) + m.MaximumContingencyDown = Var( m.BALANCING_AREA_TIMEPOINTS, - doc=("Maximum of the registered Spinning_Reserve_Contingencies, after " + doc=("Maximum of the registered Spinning_Reserve_Down_Contingencies, after " "multiplying by contingency_safety_factor.") ) - m.BALANCING_AREA_TIMEPOINT_CONTINGENCIES = Set( - initialize=m.BALANCING_AREA_TIMEPOINTS * m.Spinning_Reserve_Contingencies, - doc=("The set of spinning reserve contingencies, copied from the " - "dynamic list Spinning_Reserve_Contingencies to simplify the " - "process of defining one constraint per contingency in the list.") + m.Calculate_MaximumContingencyUp = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + m.Spinning_Reserve_Up_Contingencies, # list of contingency events + rule=lambda m, b, t, contingency: + m.MaximumContingencyUp[b, t] >= m.contingency_safety_factor * getattr(m, contingency)[b, t] ) - m.Enforce_MaximumContingency = Constraint( - m.BALANCING_AREA_TIMEPOINT_CONTINGENCIES, + m.Calculate_MaximumContingencyDown = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + m.Spinning_Reserve_Down_Contingencies, # list of contingency events rule=lambda m, b, t, contingency: - m.MaximumContingency[b, t] >= m.contingency_safety_factor * getattr(m, contingency)[b, t] + m.MaximumContingencyDown[b, t] >= m.contingency_safety_factor * getattr(m, contingency)[b, t] ) - m.Spinning_Reserve_Up_Requirements.append('MaximumContingency') - m.Satisfy_Spinning_Reserve_Up_Requirement = Constraint( + # create reserve requirements equal to the largest contingencies + # (these could eventually be region-specific) + m.MaximumContingencyUpRequirement = Expression( + [m.options.contingency_reserve_type], m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: \ - sum(getattr(m, requirement)[b,t] - for requirement in m.Spinning_Reserve_Up_Requirements - ) <= - sum(getattr(m, provision)[b,t] - for provision in m.Spinning_Reserve_Up_Provisions - ) + rule=lambda m, rt, ba, tp: m.MaximumContingencyUp[ba, tp] ) - m.Satisfy_Spinning_Reserve_Down_Requirement = Constraint( + m.MaximumContingencyDownRequirement = Expression( + [m.options.contingency_reserve_type], m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: \ - sum(getattr(m, requirement)[b,t] - for requirement in m.Spinning_Reserve_Down_Requirements - ) <= - sum(getattr(m, provision)[b,t] - for provision in m.Spinning_Reserve_Down_Provisions - ) + rule=lambda m, rt, ba, tp: m.MaximumContingencyDown[ba, tp] + ) + + m.Spinning_Reserve_Up_Requirements.append('MaximumContingencyUpRequirement') + m.Spinning_Reserve_Down_Requirements.append('MaximumContingencyDownRequirement') + + # aggregate the requirements for each type of reserves during each timepoint + def rule(m): + def makedict(m, lst): + # lst is the name of a dynamic list from which to aggregate components + d = defaultdict(float) + for comp in getattr(m, lst): + for key, val in iteritems(getattr(m, comp)): + d[key] += val + setattr(m, lst + '_dict', d) + makedict(m, 'Spinning_Reserve_Up_Requirements') + makedict(m, 'Spinning_Reserve_Down_Requirements') + makedict(m, 'Spinning_Reserve_Up_Provisions') + makedict(m, 'Spinning_Reserve_Down_Provisions') + m.Aggregate_Spinning_Reserve_Details = BuildAction(rule=rule) + + m.SPINNING_RESERVE_REQUIREMENT_UP_BALANCING_AREA_TIMEPOINTS = Set( + dimen=3, + rule=lambda m: m.Spinning_Reserve_Up_Requirements_dict.keys() + ) + m.SPINNING_RESERVE_REQUIREMENT_DOWN_BALANCING_AREA_TIMEPOINTS = Set( + dimen=3, + rule=lambda m: m.Spinning_Reserve_Down_Requirements_dict.keys() + ) + + # satisfy all spinning reserve requirements + m.Satisfy_Spinning_Reserve_Up_Requirement = Constraint( + m.SPINNING_RESERVE_REQUIREMENT_UP_BALANCING_AREA_TIMEPOINTS, + rule=lambda m, rt, ba, tp: + m.Spinning_Reserve_Up_Provisions_dict.pop((rt, ba, tp), 0.0) + >= + m.Spinning_Reserve_Up_Requirements_dict.pop((rt, ba, tp)) + ) + m.Satisfy_Spinning_Reserve_Down_Requirement = Constraint( + m.SPINNING_RESERVE_REQUIREMENT_DOWN_BALANCING_AREA_TIMEPOINTS, + rule=lambda m, rt, ba, tp: + m.Spinning_Reserve_Down_Provisions_dict.pop((rt, ba, tp), 0.0) + >= + m.Spinning_Reserve_Down_Requirements_dict.pop((rt, ba, tp)) ) @@ -517,20 +670,21 @@ def load_inputs(m, switch_data, inputs_dir): """ All files & columns are optional. - generation_projects_info.tab - GENERATION_PROJECTS, ... gen_can_provide_spinning_reserves + generation_projects_reserve_capability.tab + GENERATION_PROJECTS, RESERVE_TYPES, [gen_reserve_type_max_share] spinning_reserve_params.dat may override the default value of - contingency_safety_factor. Note that is is a .dat file, not a .tab file. + contingency_safety_factor. Note that this is a .dat file, not a .tab file. """ switch_data.load_aug( - filename=os.path.join(inputs_dir, 'generation_projects_info.tab'), + filename=os.path.join(inputs_dir, 'generation_projects_reserve_capability.tab'), auto_select=True, - optional_params=['gen_can_provide_spinning_reserves'], - param=(m.gen_can_provide_spinning_reserves) + optional_params=['gen_reserve_type_max_share]'], + index=m.GEN_SPINNING_RESERVE_TYPES, + param=(m.gen_reserve_type_max_share) ) + switch_data.load_aug( filename=os.path.join(inputs_dir, 'spinning_reserve_params.dat'), optional=True, ) - diff --git a/switch_model/generators/core/build.py b/switch_model/generators/core/build.py index 2d11e4b02..d3174b424 100644 --- a/switch_model/generators/core/build.py +++ b/switch_model/generators/core/build.py @@ -213,6 +213,9 @@ def define_components(mod): mod.VARIABLE_GENS = Set( initialize=mod.GENERATION_PROJECTS, filter=lambda m, g: m.gen_is_variable[g]) + mod.VARIABLE_GENS_IN_ZONE = Set( + mod.LOAD_ZONES, + initialize=lambda m, z: [g for g in m.GENS_IN_ZONE[z] if m.gen_is_variable[g]]) mod.BASELOAD_GENS = Set( initialize=mod.GENERATION_PROJECTS, filter=lambda m, g: m.gen_is_baseload[g]) diff --git a/switch_model/generators/core/commit/discrete.py b/switch_model/generators/core/commit/discrete.py index 65ecdafbc..b22a97bad 100644 --- a/switch_model/generators/core/commit/discrete.py +++ b/switch_model/generators/core/commit/discrete.py @@ -21,7 +21,7 @@ def define_components(mod): Unless otherwise stated, all power capacity is specified in units of MW and all sets and parameters are mandatory. - GEN_TPS_DISCRETE is a subset of GEN_TPS + DISCRETE_GEN_TPS is a subset of GEN_TPS that only includes projects that have gen_unit_size defined. CommitGenUnits[(g, bld_yr) in GEN_BLD_YRS_DISCRETE] is an @@ -48,15 +48,16 @@ def define_components(mod): """ - mod.GEN_TPS_DISCRETE = Set( - initialize=mod.GEN_TPS, - filter=lambda m, g, t: ( - g in m.DISCRETELY_SIZED_GENS)) + mod.DISCRETE_GEN_TPS = Set( + dimen=2, + initialize=lambda m: + [(g, t) for g in m.DISCRETELY_SIZED_GENS for t in m.TPS_FOR_GEN[g]] + ) mod.CommitGenUnits = Var( - mod.GEN_TPS_DISCRETE, + mod.DISCRETE_GEN_TPS, within=NonNegativeIntegers) mod.Commit_Units_Consistency = Constraint( - mod.GEN_TPS_DISCRETE, + mod.DISCRETE_GEN_TPS, rule=lambda m, g, t: ( m.CommitGen[g, t] == m.CommitGenUnits[g, t] * m.gen_unit_size[g] * m.gen_availability[g])) diff --git a/switch_model/hawaii/demand_response_simple.py b/switch_model/hawaii/demand_response_simple.py index a86af8e67..a4bd8aa94 100644 --- a/switch_model/hawaii/demand_response_simple.py +++ b/switch_model/hawaii/demand_response_simple.py @@ -5,6 +5,11 @@ def define_arguments(argparser): argparser.add_argument('--demand-response-share', type=float, default=0.30, help="Fraction of hourly load that can be shifted to other times of day (default=0.30)") + argparser.add_argument('--demand-response-reserve-types', nargs='+', default=['spinning'], + help= + "Type(s) of reserves to provide from demand response (e.g., 'contingency' or 'regulation'). " + "Specify 'none' to disable." + ) def define_components(m): @@ -29,23 +34,63 @@ def define_components(m): # add demand response to the zonal energy balance m.Zone_Power_Withdrawals.append('ShiftDemand') - # # calculate up and down reserves (from supply perspective, so "up" means less load) - # # note: the bids are negative quantities, indicating _production_ of reserves; - # # they contribute to the reserve requirement with opposite sign - # m.DemandUpReserves = Expression(m.LOAD_ZONES, m.TIMEPOINTS, - # rule=lambda m, z, tp: m.DemandResponse[z, t] - m.DemandResponse[z, t].lb - # ) - # m.DemandDownReserves = Expression(m.LOAD_ZONES, m.TIMEPOINTS, - # # assume loads could quickly be - # rule=lambda m, z, tp: m.DemandResponse[z, t].ub - m.DemandResponse[z, t] - # ) - - # Register with spinning reserves if it is available - if hasattr(m, 'Spinning_Reserve_Up_Provisions'): - m.HIDemandResponseSimpleSpinningReserveUp = Expression( - m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: - sum(m.DemandResponse[z, t] - m.DemandResponse[z, t].lb - for z in m.ZONES_IN_BALANCING_AREA[b]) - ) - m.Spinning_Reserve_Up_Provisions.append('HIDemandResponseSimpleSpinningReserveUp') + if [rt.lower() for rt in m.options.demand_response_reserve_types] != ['none']: + # Register with spinning reserves + if hasattr(m, 'Spinning_Reserve_Up_Provisions'): + # calculate available slack from demand response + # (from supply perspective, so "up" means less load) + m.DemandResponseSlackUp = Expression(m.BALANCING_AREA_TIMEPOINTS, rule=lambda m, b, t: + sum( + m.ShiftDemand[z, t] - m.ShiftDemand[z, t].lb + for z in m.ZONES_IN_BALANCING_AREA[b] + ) + ) + m.DemandResponseSlackDown = Expression(m.BALANCING_AREA_TIMEPOINTS, rule=lambda m, b, tp: + sum( + # Assume shiftable load can only be raised by factor of 8 (i.e., concentrate in 3 hours) + 24/3 * m.demand_response_max_share * m.zone_demand_mw[z, tp] - m.ShiftDemand[z, tp] + for z in m.ZONES_IN_BALANCING_AREA[b] + ) + ) + if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + # using advanced formulation, index by reserve type, balancing area, timepoint + # define variables for each type of reserves to be provided + # choose how to allocate the slack between the different reserve products + m.DR_SPINNING_RESERVE_TYPES = Set( + initialize=m.options.demand_response_reserve_types + ) + m.DemandResponseSpinningReserveUp = Var( + m.DR_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals + ) + m.DemandResponseSpinningReserveDown = Var( + m.DR_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals + ) + # constrain reserve provision within available slack + m.Limit_DemandResponseSpinningReserveUp = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: + sum( + m.DemandResponseSpinningReserveUp[rt, ba, tp] + for rt in m.DR_SPINNING_RESERVE_TYPES + ) <= m.DemandResponseSlackUp[ba, tp] + ) + m.Limit_DemandResponseSpinningReserveDown = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: + sum( + m.DemandResponseSpinningReserveDown[rt, ba, tp] + for rt in m.DR_SPINNING_RESERVE_TYPES + ) <= m.DemandResponseSlackDown[ba, tp] + ) + m.Spinning_Reserve_Up_Provisions.append('DemandResponseSpinningReserveUp') + m.Spinning_Reserve_Down_Provisions.append('DemandResponseSpinningReserveDown') + else: + # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint + if m.options.demand_response_reserve_types != ['spinning']: + raise ValueError( + 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' + ) + m.Spinning_Reserve_Up_Provisions.append('DemandResponseSlackUp') + m.Spinning_Reserve_Down_Provisions.append('DemandResponseSlackDown') diff --git a/switch_model/hawaii/ev.py b/switch_model/hawaii/ev.py index 0f3c53c2e..554bbc5c3 100644 --- a/switch_model/hawaii/ev.py +++ b/switch_model/hawaii/ev.py @@ -5,6 +5,11 @@ def define_arguments(argparser): argparser.add_argument("--ev-timing", choices=['bau', 'flat', 'optimal'], default='optimal', help="Rule for when to charge EVs -- business-as-usual (upon arrival), flat around the clock, or optimal (default).") + argparser.add_argument('--ev-reserve-types', nargs='+', default=['spinning'], + help= + "Type(s) of reserves to provide from electric-vehicle charging (e.g., 'contingency' or 'regulation')." + "Default is generic 'spinning'. Specify 'none' to disable. Only takes effect with '--ev-timing optimal'." + ) def define_components(m): # setup various parameters describing the EV and ICE fleet each year @@ -85,17 +90,48 @@ def define_components(m): # add the EV load to the model's energy balance m.Zone_Power_Withdrawals.append('ChargeEVs') - # Register with spinning reserves if it is available and optimal EV - # charging is enabled. - if('Spinning_Reserve_Up_Provisions' in dir(m) and - m.options.ev_timing == "optimal"): - m.EVSpinningReserveUp = Expression( - m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: - sum(m.ChargeEVs[z, t] - for z in m.ZONES_IN_BALANCING_AREA[b]) - ) - m.Spinning_Reserve_Up_Provisions.append('EVSpinningReserveUp') + # Register with spinning reserves if it is available and optimal EV charging is enabled. + if [rt.lower() for rt in m.options.ev_reserve_types] != ['none'] and m.options.ev_timing == "optimal": + if hasattr(m, 'Spinning_Reserve_Up_Provisions'): + # calculate available slack from EV charging + # (from supply perspective, so "up" means less load) + m.EVSlackUp = Expression( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, t: + sum(m.ChargeEVs[z, t] for z in m.ZONES_IN_BALANCING_AREA[b]) + ) + # note: we currently ignore down-reserves (option of increasing consumption) + # from EVs since it's not clear how high they could go; we could revisit this if + # down-reserves have a positive price at equilibrium (probabably won't) + if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + # using advanced formulation, index by reserve type, balancing area, timepoint + # define variables for each type of reserves to be provided + # choose how to allocate the slack between the different reserve products + m.EV_SPINNING_RESERVE_TYPES = Set( + initialize=m.options.ev_reserve_types + ) + m.EVSpinningReserveUp = Var( + m.EV_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals + ) + # constrain reserve provision within available slack + m.Limit_EVSpinningReserveUp = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: + sum( + m.EVSpinningReserveUp[rt, ba, tp] + for rt in m.EV_SPINNING_RESERVE_TYPES + ) <= m.EVSlackUp[ba, tp] + ) + m.Spinning_Reserve_Up_Provisions.append('EVSpinningReserveUp') + else: + # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint + if m.options.ev_reserve_types != ['spinning']: + raise ValueError( + 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' + ) + m.Spinning_Reserve_Up_Provisions.append('EVSlackUp') + def load_inputs(m, switch_data, inputs_dir): diff --git a/switch_model/hawaii/scenario_data.py b/switch_model/hawaii/scenario_data.py index e103c4e27..b31aed4c3 100644 --- a/switch_model/hawaii/scenario_data.py +++ b/switch_model/hawaii/scenario_data.py @@ -444,6 +444,45 @@ def write_tables(**args): ORDER BY 1, 2; """, args) + ######################### + # spinning_reserves_advanced + # args['max_reserve_capability'] is a list of tuples of (technology, reserve_type) + # (assumed equivalent to 'regulation' if not specified) + # We unzip it to use with the unnest function (psycopg2 passes lists of tuples + # as arrays of tuples, and unnest would keeps those as tuples) + try: + reserve_technologies, reserve_types = map(list, zip(*args['max_reserve_capability'])) + except KeyError: + reserve_technologies, reserve_types = [], [] + res_args = args.copy() + res_args['reserve_technologies']=reserve_technologies + res_args['reserve_types']=reserve_types + + write_table('generation_projects_reserve_capability.tab', """ + WITH reserve_capability (technology, reserve_type) as ( + SELECT + UNNEST(%(reserve_technologies)s) AS technology, + UNNEST(%(reserve_types)s) AS reserve_type + ), + reserve_types (rank, reserve_type) as ( + VALUES + (0, 'none'), + (1, 'contingency'), + (2, 'regulation') + ) + SELECT + p."GENERATION_PROJECT", + t2.reserve_type AS "SPINNING_RESERVE_TYPE" + FROM + study_projects p + LEFT JOIN reserve_capability c USING (technology) + LEFT JOIN reserve_types t1 USING (reserve_type) + JOIN reserve_types t2 on t2.rank <= COALESCE(t1.rank, 100) + WHERE t2.rank > 0 + ORDER BY 1, t2.rank; + """, res_args) + + ######################### # operation.unitcommit.fuel_use diff --git a/switch_model/utilities.py b/switch_model/utilities.py index d7e1f659a..aeaa41792 100644 --- a/switch_model/utilities.py +++ b/switch_model/utilities.py @@ -601,3 +601,12 @@ def flush(self): self.terminal.flush() self.log_file.flush() + +def iteritems(obj): + """ Iterator of key, value pairs for obj; + equivalent to obj.items() on Python 3+ and obj.iteritems() on Python 2 """ + try: + return obj.iteritems() + except AttributeError: # Python 3+ + return obj.items() + From 75808f0c273bfbdd47997cb80668e57766cd9a1b Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 15 Feb 2018 09:51:16 -1000 Subject: [PATCH 14/51] improve reporting on infeasible models --- switch_model/solve.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/switch_model/solve.py b/switch_model/solve.py index 235221030..107b16c63 100755 --- a/switch_model/solve.py +++ b/switch_model/solve.py @@ -574,11 +574,12 @@ def solve(model): return results elif (results.solver.termination_condition == TerminationCondition.infeasible): if hasattr(model, "iis"): - print "Model was infeasible; irreducible infeasible set (IIS) returned by solver:" + print "Model was infeasible; irreducibly inconsistent set (IIS) returned by solver:" print "\n".join(c.name for c in model.iis) else: - print "Model was infeasible; if the solver can generate an irreducible infeasible set," - print "more information may be available by calling this script with --suffixes iis ..." + print "Model was infeasible; if the solver can generate an irreducibly inconsistent set (IIS)," + print "more information may be available by setting the appropriate flags in the " + print 'solver_options_string and calling this script with "--suffixes iis".' raise RuntimeError("Infeasible model") else: print "Solver terminated abnormally." From 7e353dd904c1f3e54bd689d0d5885c9c6c8252ee Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 15 Feb 2018 09:52:48 -1000 Subject: [PATCH 15/51] miscellaneous improvements in Hawaii package --- switch_model/hawaii/rps.py | 111 ++++++++++++++++++++++++---- switch_model/hawaii/save_results.py | 3 +- switch_model/hawaii/switch_patch.py | 19 ++++- 3 files changed, 115 insertions(+), 18 deletions(-) diff --git a/switch_model/hawaii/rps.py b/switch_model/hawaii/rps.py index e06562e6d..9e0d87d9f 100644 --- a/switch_model/hawaii/rps.py +++ b/switch_model/hawaii/rps.py @@ -7,12 +7,14 @@ def define_arguments(argparser): argparser.add_argument('--biofuel-limit', type=float, default=1.0, help="Maximum fraction of power that can be obtained from biofuel in any period (default=1.0)") + argparser.add_argument('--biofuel-switch-threshold', type=float, default=1.0, + help="RPS level at which all thermal plants switch to biofuels (0.0-1.0, default=1.0); use with --rps-allocation fuel_switch_at_high_rps") argparser.add_argument('--rps-activate', default='activate', dest='rps_level', action='store_const', const='activate', help="Activate RPS (on by default).") argparser.add_argument('--rps-deactivate', dest='rps_level', action='store_const', const='deactivate', - help="Dectivate RPS.") + help="Deactivate RPS.") argparser.add_argument('--rps-no-renewables', dest='rps_level', action='store_const', const='no_renewables', help="Deactivate RPS and don't allow any new renewables.") @@ -24,6 +26,7 @@ def define_arguments(argparser): 'full_load_heat_rate', 'split_commit', 'relaxed_split_commit', + 'fuel_switch_at_high_rps', ], help="Method to use to allocate power output among fuels. Default is fuel_switch_by_period for models " + "with unit commitment, full_load_heat_rate for models without." @@ -40,7 +43,8 @@ def define_components(m): m.f_rps_eligible = Param(m.FUELS, within=Binary) m.RPS_ENERGY_SOURCES = Set(initialize=lambda m: - list(m.NON_FUEL_ENERGY_SOURCES) + [f for f in m.FUELS if m.f_rps_eligible[f]]) + [s for s in m.NON_FUEL_ENERGY_SOURCES if s != 'Battery'] + [f for f in m.FUELS if m.f_rps_eligible[f]] + ) m.RPS_YEARS = Set(ordered=True) m.rps_target = Param(m.RPS_YEARS) @@ -63,10 +67,9 @@ def rps_target_for_period_rule(m, p): # calculate amount of power produced from renewable fuels during each period m.RPSFuelPower = Expression(m.PERIODS, rule=lambda m, per: sum( - m.DispatchGenRenewableMW[p, tp] * m.tp_weight[tp] - for p in m.FUEL_BASED_GENS - if (p, m.TPS_IN_PERIOD[per].first()) in m.GEN_TPS - for tp in m.TPS_IN_PERIOD[per] + m.DispatchGenRenewableMW[g, tp] * m.tp_weight[tp] + for g in m.FUEL_BASED_GENS + for tp in m.TPS_FOR_GEN_IN_PERIOD[g, per] ) ) @@ -81,20 +84,20 @@ def rps_target_for_period_rule(m, p): m.RPSFuelPower[per] + sum( - m.DispatchGen[p, tp] * m.tp_weight[tp] - for f in m.NON_FUEL_ENERGY_SOURCES if f in m.RPS_ENERGY_SOURCES - for p in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[f] - if (p, m.TPS_IN_PERIOD[per].first()) in m.GEN_TPS - for tp in m.TPS_IN_PERIOD[per] + m.DispatchGen[g, tp] * m.tp_weight[tp] + for f in m.NON_FUEL_ENERGY_SOURCES if f in m.RPS_ENERGY_SOURCES + for g in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[f] + for tp in m.TPS_FOR_GEN_IN_PERIOD[g, per] ) ) # total power production each period (against which RPS is measured) + # note: we exclude production from storage m.RPSTotalPower = Expression(m.PERIODS, rule=lambda m, per: sum( - m.DispatchGen[p, tp] * m.tp_weight[tp] - for p in m.GENERATION_PROJECTS if (p, m.TPS_IN_PERIOD[per].first()) in m.GEN_TPS - for tp in m.TPS_IN_PERIOD[per] + m.DispatchGen[g, tp] * m.tp_weight[tp] + for g in m.GENERATION_PROJECTS if g not in getattr(m, 'STORAGE_GENS', []) + for tp in m.TPS_FOR_GEN_IN_PERIOD[g, per] ) ) @@ -169,7 +172,8 @@ def define_DispatchGenRenewableMW(m): split_commit_DispatchGenRenewableMW(m) elif m.options.rps_allocation == 'relaxed_split_commit': relaxed_split_commit_DispatchGenRenewableMW(m) - + elif m.options.rps_allocation == 'fuel_switch_at_high_rps': + fuel_switch_at_high_rps_DispatchGenRenewableMW(m) def simple_DispatchGenRenewableMW(m): # Allocate the power produced during each timepoint among the fuels. @@ -309,7 +313,21 @@ def relaxed_split_commit_DispatchGenRenewableMW(m): * m.gen_min_load_fraction_TP[g, tp] ) + # rule=lambda m, g, t, intercept, incremental_heat_rate: ( + # sum(m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g]) >= + # # Do the startup + # m.StartupGenCapacity[g, t] * m.gen_startup_fuel[g] / m.tp_duration_hrs[t] + + # intercept * m.CommitGen[g, t] + + # incremental_heat_rate * m.DispatchGen[g, t])) + + # TODO: fix bug in this code that forces renewable dispatch=total committed when + # using 100% RPS (this makes it hard to get reserves and makes it impossible to + # use the AES plant when using discrete commitment, because the PSIP module limits + # output to 180 MW but the plant is rated 185 MW.) + # use standard heat rate calculations for renewable and non-renewable parts + # These set a lower bound for each type of fuel, as if we committed one slice of capacity + # for renewables and one slice for non-renewable, equal to the amount of power from each. m.ProjRenewableFuelUseRate_Calculate = Constraint( m.GEN_TPS_FUEL_PIECEWISE_CONS_SET, rule=lambda m, g, tp, intercept, incremental_heat_rate: @@ -333,7 +351,7 @@ def relaxed_split_commit_DispatchGenRenewableMW(m): ) >= (m.StartupGenCapacity[g, tp] - m.StartupGenCapacityRenewable[g, tp]) * m.gen_startup_fuel[g] / m.tp_duration_hrs[tp] - + intercept * (m.CommitGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) + + intercept * (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) + incremental_heat_rate * (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) ) @@ -355,7 +373,59 @@ def relaxed_split_commit_DispatchGenRenewableMW(m): m.FULL_RPS_GEN_FOSSIL_FUEL_DISPATCH_POINTS, rule=lambda m, g, tp, f: m.GenFuelUseRate[g, tp, f] == 0.0 ) + + # only count biofuels toward RPS + # prevent use of non-renewable fuels during renewable timepoints + def Enforce_DispatchRenewableFlag_rule(m, g, tp, f): + if m.f_rps_eligible[f]: + return Constraint.Skip + else: + # harder to read like this, but having all numerical values on the right hand side + # facilitates analysis of duals and reduced costs + # note: we also add a little slack to avoid having this be the main constraint + # on total output from any power plant (that also clarifies dual analysis) + big_fuel = 1.01 * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] + return ( + m.GenFuelUseRate[g, tp, f] + + m.DispatchRenewableFlag[g, m.tp_period[tp]] * big_fuel + <= + big_fuel + ) + m.Enforce_DispatchRenewableFlag = Constraint( + m.GEN_TP_FUELS, rule=Enforce_DispatchRenewableFlag_rule + ) +def fuel_switch_at_high_rps_DispatchGenRenewableMW(m): + """ switch all plants to biofuel (and count toward RPS) if and only if rps is above threshold """ + + if m.options.rps_level == 'activate': + # find all dispatch points for non-renewable fuels during periods with 100% RPS + m.HIGH_RPS_GEN_FOSSIL_FUEL_DISPATCH_POINTS = Set( + dimen=3, + initialize=lambda m: [ + (g, tp, f) + for p in m.PERIODS if m.rps_target_for_period[p] >= m.options.biofuel_switch_threshold + for g in m.FUEL_BASED_GENS if (g, p) in m.GEN_PERIODS + for f in m.FUELS_FOR_GEN[g] if not m.f_rps_eligible[f] + for tp in m.TPS_IN_PERIOD[p] + ] + ) + m.No_Fossil_Fuel_With_High_RPS = Constraint( + m.HIGH_RPS_GEN_FOSSIL_FUEL_DISPATCH_POINTS, + rule=lambda m, g, tp, f: m.GenFuelUseRate[g, tp, f] == 0.0 + ) + # count full dispatch toward RPS during non-fossil periods, otherwise give no credit + def rule(m, g, tp): + if m.rps_target_for_period[m.tp_period[tp]] >= m.options.biofuel_switch_threshold: + return m.DispatchGen[g, tp] + else: + return 0.0 + m.DispatchGenRenewableMW = Expression(m._FUEL_BASED_GEN_TPS, rule=rule) + else: + m.DispatchGenRenewableMW = Expression( + m._FUEL_BASED_GEN_TPS, within=NonNegativeReals, + rule=lambda m, g, tp: 0.0 + ) def binary_by_period_DispatchGenRenewableMW(m): # NOTE: this could be extended to handle fuel blends (e.g., 50% biomass/50% coal) @@ -378,6 +448,15 @@ def binary_by_period_DispatchGenRenewableMW(m): m.DispatchRenewableFlag = Var(m.GEN_WITH_FUEL_ACTIVE_PERIODS, within=Binary) # force flag on or off when the RPS is simple (to speed computation) + def rule(m, g, p): + if m.rps_target_for_period[pe]==1.0: + # 100% RPS; use only renewable fuels + return (m.DispatchRenewableFlag[g, pe] == 1) + elif m.rps_target_for_period[pe]==0.0 or m.options.rps_level != 'activate': + # no RPS, don't bother counting renewable fuels + return (m.DispatchRenewableFlag[g, pe] == 0) + else: + return Constraint.Skip m.Force_DispatchRenewableFlag = Constraint( m.GEN_WITH_FUEL_ACTIVE_PERIODS, rule=lambda m, g, pe: diff --git a/switch_model/hawaii/save_results.py b/switch_model/hawaii/save_results.py index 2b8f67b01..6f598298a 100644 --- a/switch_model/hawaii/save_results.py +++ b/switch_model/hawaii/save_results.py @@ -208,8 +208,9 @@ def gen_energy_source(g): g for pe in m.PERIODS for g in m.GENERATION_PROJECTS if value(m.GenCapacity[g, pe]) > 0.001 ))) active_periods_for_gen = defaultdict(set) + used_cap = getattr(m, 'CommitGen', m.DispatchGen) # use CommitGen if available, otherwise DispatchGen for (g, tp) in m.GEN_TPS: - if value(m.DispatchGen[g, tp]) > 0.001: + if value(used_cap[g, tp]) > 0.001: active_periods_for_gen[g].add(m.tp_period[tp]) # add the periods between the first and last active period if capacity was available then operate_gen_in_period = set() diff --git a/switch_model/hawaii/switch_patch.py b/switch_model/hawaii/switch_patch.py index 87a084590..b04c516ed 100644 --- a/switch_model/hawaii/switch_patch.py +++ b/switch_model/hawaii/switch_patch.py @@ -1,6 +1,23 @@ from pyomo.environ import * import switch_model.utilities as utilities -from util import get + +# patch Pyomo's solver to retrieve duals and reduced costs for MIPs from cplex lp solver +# (This could be made permanent in pyomo.solvers.plugins.solvers.CPLEX.create_command_line) +def new_create_command_line(*args, **kwargs): + # call original command + command = old_create_command_line(*args, **kwargs) + # alter script + if hasattr(command, 'script') and 'optimize\n' in command.script: + command.script = command.script.replace( + 'optimize\n', + 'optimize\nchange problem fix\noptimize\n' + # see http://www-01.ibm.com/support/docview.wss?uid=swg21399941 + # and http://www-01.ibm.com/support/docview.wss?uid=swg21400009 + ) + return command +from pyomo.solvers.plugins.solvers.CPLEX import CPLEXSHELL +old_create_command_line = CPLEXSHELL.create_command_line +CPLEXSHELL.create_command_line = new_create_command_line def define_components(m): """Make various changes to the model to facilitate reporting and avoid unwanted behavior""" From c66bec69cc0dbb7fffbc2bc696e6523eda3340a2 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 15 Feb 2018 10:39:13 -1000 Subject: [PATCH 16/51] bug fixes in iterative demand_response.iterative --- .../demand_response/iterative/__init__.py | 32 ++++++------------- 1 file changed, 9 insertions(+), 23 deletions(-) diff --git a/switch_model/balancing/demand_response/iterative/__init__.py b/switch_model/balancing/demand_response/iterative/__init__.py index e68772e8c..db7ee6948 100644 --- a/switch_model/balancing/demand_response/iterative/__init__.py +++ b/switch_model/balancing/demand_response/iterative/__init__.py @@ -25,32 +25,12 @@ import pyomo.repn.canonical_repn import switch_model.utilities as utilities -from save_results import DispatchGenByFuel +# TODO: move part of the reporting back into Hawaii module and eliminate these dependencies +from switch_model.hawaii.save_results import DispatchGenByFuel +import switch_model.hawaii.util as util demand_module = None # will be set via command-line options -import util -from util import get - -# patch Pyomo's solver to retrieve duals and reduced costs for MIPs from cplex lp solver -# (This could be made permanent in pyomo.solvers.plugins.solvers.CPLEX.create_command_line) -def new_create_command_line(*args, **kwargs): - # call original command - command = old_create_command_line(*args, **kwargs) - # alter script - if hasattr(command, 'script') and 'optimize\n' in command.script: - command.script = command.script.replace( - 'optimize\n', - 'optimize\nchange problem fix\noptimize\n' - # see http://www-01.ibm.com/support/docview.wss?uid=swg21399941 - # and http://www-01.ibm.com/support/docview.wss?uid=swg21400009 - ) - return command -from pyomo.solvers.plugins.solvers.CPLEX import CPLEXSHELL -old_create_command_line = CPLEXSHELL.create_command_line -CPLEXSHELL.create_command_line = new_create_command_line - - def define_arguments(argparser): argparser.add_argument("--dr-flat-pricing", action='store_true', default=False, help="Charge a constant (average) price for electricity, rather than varying hour by hour") @@ -967,6 +947,12 @@ def summary_values(m): return values +def get(component, idx, default): + try: + return component[idx] + except KeyError: + return default + def write_results(m): outputs_dir = m.options.outputs_dir tag = filename_tag(m) From 9bf5bac4788fff7f62d46bb6007d1d1f0001cb55 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 15 Feb 2018 11:26:53 -1000 Subject: [PATCH 17/51] Upgrade input directories that were created for intermediate versions of Switch (e.g., 2.0.0b3) --- switch_model/upgrade/manager.py | 5 +++-- switch_model/upgrade/upgrade_2_0_0b4.py | 4 +--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/switch_model/upgrade/manager.py b/switch_model/upgrade/manager.py index c9613c9d1..8af391d31 100644 --- a/switch_model/upgrade/manager.py +++ b/switch_model/upgrade/manager.py @@ -78,7 +78,6 @@ def _write_input_version(inputs_dir, new_version): version_path = os.path.join(inputs_dir, version_file) with open(version_path, 'w') as f: f.write(new_version + "\n") - def do_inputs_need_upgrade(inputs_dir): """ @@ -120,7 +119,9 @@ def upgrade_inputs(inputs_dir, backup=True): # Successively apply the upgrade scripts as needed. for (upgrader, v_from, v_to) in upgrade_plugins: inputs_v = StrictVersion(get_input_version(inputs_dir)) - if inputs_v == StrictVersion(v_from): + # note: the next line catches datasets created by/for versions of Switch that + # didn't require input directory upgrades + if StrictVersion(v_from) <= inputs_v < StrictVersion(v_to): print_verbose('\tUpgrading from ' + v_from + ' to ' + v_to) upgrader.upgrade_input_dir(inputs_dir) print_verbose('\tFinished upgrading ' + inputs_dir + '\n') diff --git a/switch_model/upgrade/upgrade_2_0_0b4.py b/switch_model/upgrade/upgrade_2_0_0b4.py index f65102fbe..7f44677be 100644 --- a/switch_model/upgrade/upgrade_2_0_0b4.py +++ b/switch_model/upgrade/upgrade_2_0_0b4.py @@ -39,9 +39,7 @@ def rename_column(file_name, old_col_name, new_col_name, optional_file=True): } for fname, old_new_pairs in old_new_column_names_in_file.iteritems(): - for old_new_pair in old_new_pairs: - old = old_new_pair[0] - new = old_new_pair[1] + for old, new in old_new_pairs: rename_column(fname, old_col_name=old, new_col_name=new) # Write a new version text file. From 9e7171e8cee36943c34d71dec883289af19b5332 Mon Sep 17 00:00:00 2001 From: "josiah.johnston" Date: Fri, 16 Feb 2018 17:06:09 -0800 Subject: [PATCH 18/51] Added basic example to spinning_reserves_advanced. It isn't equivalent to the spinning_reserves example, which makes me think it has a bug. Updated documentation in spinning_reserves_advanced Added destinations for CLI arguments to make the file easier to grep. Fixed bugs in project-level contingencies & NREL 3+5 rule Updated default GEN_SPINNING_RESERVE_TYPES to make code match documentation, and allow generation_projects_reserve_capability.tab to be optional. Fixed a minor indexing bug in hawaii/hydrogen.py --- .../spinning_reserves_advanced/README.md | 4 + .../inputs/financials.dat | 3 + .../inputs/fuel_cost.tab | 2 + .../inputs/fuels.tab | 2 + .../inputs/gen_build_costs.tab | 6 + .../inputs/gen_build_predetermined.tab | 6 + .../inputs/gen_inc_heat_rates.tab | 5 + .../inputs/generation_projects_info.tab | 5 + ...generation_projects_reserve_capability.tab | 3 + .../inputs/load_zones.tab | 2 + .../inputs/loads.tab | 5 + .../inputs/modules.txt | 17 ++ .../inputs/non_fuel_energy_sources.tab | 3 + .../inputs/periods.tab | 2 + .../inputs/spinning_reserve_params.dat | 1 + .../inputs/switch_inputs_version.txt | 1 + .../inputs/timepoints.tab | 5 + .../inputs/timeseries.tab | 2 + .../inputs/variable_capacity_factors.tab | 5 + .../inputs/zone_coincident_peak_demand.tab | 2 + .../spinning_reserves_advanced/options.txt | 2 + .../outputs/total_cost.txt | 1 + .../spinning_reserves_advanced.py | 168 +++++------------- switch_model/hawaii/hydrogen.py | 2 +- 24 files changed, 128 insertions(+), 126 deletions(-) create mode 100644 examples/production_cost_models/spinning_reserves_advanced/README.md create mode 100644 examples/production_cost_models/spinning_reserves_advanced/inputs/financials.dat create mode 100644 examples/production_cost_models/spinning_reserves_advanced/inputs/fuel_cost.tab create mode 100644 examples/production_cost_models/spinning_reserves_advanced/inputs/fuels.tab create mode 100644 examples/production_cost_models/spinning_reserves_advanced/inputs/gen_build_costs.tab create mode 100644 examples/production_cost_models/spinning_reserves_advanced/inputs/gen_build_predetermined.tab create mode 100644 examples/production_cost_models/spinning_reserves_advanced/inputs/gen_inc_heat_rates.tab create mode 100644 examples/production_cost_models/spinning_reserves_advanced/inputs/generation_projects_info.tab create mode 100644 examples/production_cost_models/spinning_reserves_advanced/inputs/generation_projects_reserve_capability.tab create mode 100644 examples/production_cost_models/spinning_reserves_advanced/inputs/load_zones.tab create mode 100644 examples/production_cost_models/spinning_reserves_advanced/inputs/loads.tab create mode 100644 examples/production_cost_models/spinning_reserves_advanced/inputs/modules.txt create mode 100644 examples/production_cost_models/spinning_reserves_advanced/inputs/non_fuel_energy_sources.tab create mode 100644 examples/production_cost_models/spinning_reserves_advanced/inputs/periods.tab create mode 100644 examples/production_cost_models/spinning_reserves_advanced/inputs/spinning_reserve_params.dat create mode 100644 examples/production_cost_models/spinning_reserves_advanced/inputs/switch_inputs_version.txt create mode 100644 examples/production_cost_models/spinning_reserves_advanced/inputs/timepoints.tab create mode 100644 examples/production_cost_models/spinning_reserves_advanced/inputs/timeseries.tab create mode 100644 examples/production_cost_models/spinning_reserves_advanced/inputs/variable_capacity_factors.tab create mode 100644 examples/production_cost_models/spinning_reserves_advanced/inputs/zone_coincident_peak_demand.tab create mode 100644 examples/production_cost_models/spinning_reserves_advanced/options.txt create mode 100644 examples/production_cost_models/spinning_reserves_advanced/outputs/total_cost.txt diff --git a/examples/production_cost_models/spinning_reserves_advanced/README.md b/examples/production_cost_models/spinning_reserves_advanced/README.md new file mode 100644 index 000000000..872d49934 --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/README.md @@ -0,0 +1,4 @@ +SYNOPSIS + switch solve --verbose --log-run + +This example extends unit_commit by adding spinning reserve requirements. diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/financials.dat b/examples/production_cost_models/spinning_reserves_advanced/inputs/financials.dat new file mode 100644 index 000000000..5260b0024 --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/financials.dat @@ -0,0 +1,3 @@ +param base_financial_year := 2015; +param interest_rate := .07; +param discount_rate := .05; diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/fuel_cost.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/fuel_cost.tab new file mode 100644 index 000000000..7ecb71f16 --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/fuel_cost.tab @@ -0,0 +1,2 @@ +load_zone fuel period fuel_cost +South NaturalGas 2010 4 \ No newline at end of file diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/fuels.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/fuels.tab new file mode 100644 index 000000000..efbfb672e --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/fuels.tab @@ -0,0 +1,2 @@ +fuel co2_intensity upstream_co2_intensity +NaturalGas 0.05306 0 diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_build_costs.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_build_costs.tab new file mode 100644 index 000000000..72b4a3f46 --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_build_costs.tab @@ -0,0 +1,6 @@ +GENERATION_PROJECT build_year gen_overnight_cost gen_fixed_om +S-Geothermal 1998 5524200 0 +S-NG_CC 2000 1143900 5868.3 +S-NG_GT 1990 605430 4891.8 +S-NG_GT 2002 605430 4891.8 +S-Central_PV-1 2001 2334300 41850 \ No newline at end of file diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_build_predetermined.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_build_predetermined.tab new file mode 100644 index 000000000..ceac9f2fc --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_build_predetermined.tab @@ -0,0 +1,6 @@ +GENERATION_PROJECT build_year gen_predetermined_cap +S-Geothermal 1998 2.0 +S-NG_CC 2000 7.0 +S-NG_GT 1990 3.0 +S-NG_GT 2002 4.0 +S-Central_PV-1 2001 3 diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_inc_heat_rates.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_inc_heat_rates.tab new file mode 100644 index 000000000..a44733ea9 --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/gen_inc_heat_rates.tab @@ -0,0 +1,5 @@ +GENERATION_PROJECT power_start_mw power_end_mw incremental_heat_rate_mbtu_per_mwhr fuel_use_rate_mmbtu_per_h +S-NG_CC 40 . . 269.4069 +S-NG_CC 40 100.0 6.684885 . +S-NG_GT 0 . . 0.1039 +S-NG_GT 0 1.0 10.2861 . diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/generation_projects_info.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/generation_projects_info.tab new file mode 100644 index 000000000..9ec3bb4dc --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/generation_projects_info.tab @@ -0,0 +1,5 @@ +GENERATION_PROJECT gen_dbid gen_tech gen_load_zone gen_connect_cost_per_mw gen_capacity_limit_mw gen_max_age gen_min_build_capacity gen_scheduled_outage_rate gen_forced_outage_rate gen_is_variable gen_is_baseload gen_is_cogen gen_variable_om gen_energy_source gen_full_load_heat_rate gen_unit_size gen_min_load_fraction gen_startup_fuel gen_startup_om gen_min_downtime gen_can_provide_spinning_reserves +S-Geothermal 33 Geothermal South 134222 3 30 0 0.0075 0.0241 0 1 0 28.83 Geothermal . . . . . . 0 +S-NG_CC 34 NG_CC South 57566.6 . 20 0 0.04 0.06 0 0 0 3.4131 NaturalGas 6.705 1 0.4 9.16 10.3 12 1 +S-NG_GT 36 NG_GT South 57566.6 . 20 0 0.04 0.06 0 0 0 27.807 NaturalGas 10.39 . 0 0.22 0.86 . 1 +S-Central_PV-1 41 Central_PV South 74881.9 4 20 0 0 0.02 1 0 0 0 Solar . . 0 . 0 . 0 \ No newline at end of file diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/generation_projects_reserve_capability.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/generation_projects_reserve_capability.tab new file mode 100644 index 000000000..3f2ff51ab --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/generation_projects_reserve_capability.tab @@ -0,0 +1,3 @@ +GENERATION_PROJECT SPINNING_RESERVE_TYPES +S-NG_CC spinning +S-NG_GT spinning \ No newline at end of file diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/load_zones.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/load_zones.tab new file mode 100644 index 000000000..2bda9cb2b --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/load_zones.tab @@ -0,0 +1,2 @@ +LOAD_ZONE existing_local_td local_td_annual_cost_per_mw +South 10 128040 diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/loads.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/loads.tab new file mode 100644 index 000000000..6043e8557 --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/loads.tab @@ -0,0 +1,5 @@ +LOAD_ZONE TIMEPOINT zone_demand_mw +South 1 3 +South 2 8 +South 3 10 +South 4 7 diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/modules.txt b/examples/production_cost_models/spinning_reserves_advanced/inputs/modules.txt new file mode 100644 index 000000000..a27c2500e --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/modules.txt @@ -0,0 +1,17 @@ +# Core Modules +switch_model +switch_model.timescales +switch_model.financials +switch_model.balancing.load_zones +switch_model.energy_sources.properties +switch_model.generators.core.build +switch_model.generators.core.dispatch +switch_model.reporting +# Custom Modules +switch_model.transmission.local_td +switch_model.generators.core.commit.operate +switch_model.generators.core.commit.fuel_use +switch_model.energy_sources.fuel_costs.simple +switch_model.balancing.operating_reserves.areas +switch_model.balancing.operating_reserves.spinning_reserves_advanced +#switch_model.reporting.dump diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/non_fuel_energy_sources.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/non_fuel_energy_sources.tab new file mode 100644 index 000000000..84ffbd347 --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/non_fuel_energy_sources.tab @@ -0,0 +1,3 @@ +energy_source +Geothermal +Solar \ No newline at end of file diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/periods.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/periods.tab new file mode 100644 index 000000000..ed32ef2af --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/periods.tab @@ -0,0 +1,2 @@ +INVESTMENT_PERIOD period_start period_end +2010 2008 2012 diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/spinning_reserve_params.dat b/examples/production_cost_models/spinning_reserves_advanced/inputs/spinning_reserve_params.dat new file mode 100644 index 000000000..01558ea51 --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/spinning_reserve_params.dat @@ -0,0 +1 @@ +param contingency_safety_factor := 1; diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/switch_inputs_version.txt b/examples/production_cost_models/spinning_reserves_advanced/inputs/switch_inputs_version.txt new file mode 100644 index 000000000..94789474f --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/switch_inputs_version.txt @@ -0,0 +1 @@ +2.0.0b4 diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/timepoints.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/timepoints.tab new file mode 100644 index 000000000..9863add2e --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/timepoints.tab @@ -0,0 +1,5 @@ +timepoint_id timestamp timeseries +1 2010011500 2010_all +2 2010011506 2010_all +3 2010011512 2010_all +4 2010011518 2010_all diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/timeseries.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/timeseries.tab new file mode 100644 index 000000000..84cc623a6 --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/timeseries.tab @@ -0,0 +1,2 @@ +TIMESERIES ts_period ts_duration_of_tp ts_num_tps ts_scale_to_period +2010_all 2010 6 1 1826.25 diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/variable_capacity_factors.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/variable_capacity_factors.tab new file mode 100644 index 000000000..dd2a630c2 --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/variable_capacity_factors.tab @@ -0,0 +1,5 @@ +GENERATION_PROJECT timepoint gen_max_capacity_factor +S-Central_PV-1 1 0.0 +S-Central_PV-1 2 0.61 +S-Central_PV-1 3 1 +S-Central_PV-1 4 0.4 diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/zone_coincident_peak_demand.tab b/examples/production_cost_models/spinning_reserves_advanced/inputs/zone_coincident_peak_demand.tab new file mode 100644 index 000000000..cf4097b3d --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/zone_coincident_peak_demand.tab @@ -0,0 +1,2 @@ +LOAD_ZONE PERIOD zone_expected_coincident_peak_demand +South 2010 10 diff --git a/examples/production_cost_models/spinning_reserves_advanced/options.txt b/examples/production_cost_models/spinning_reserves_advanced/options.txt new file mode 100644 index 000000000..202ef2d6d --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/options.txt @@ -0,0 +1,2 @@ +--spinning-requirement-rule 3+5 +--unit-contingency diff --git a/examples/production_cost_models/spinning_reserves_advanced/outputs/total_cost.txt b/examples/production_cost_models/spinning_reserves_advanced/outputs/total_cost.txt new file mode 100644 index 000000000..c7c8c9c6b --- /dev/null +++ b/examples/production_cost_models/spinning_reserves_advanced/outputs/total_cost.txt @@ -0,0 +1 @@ +28580571.2082 diff --git a/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py b/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py index e84671687..d5af07934 100644 --- a/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py +++ b/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py @@ -1,90 +1,8 @@ # Copyright (c) 2015-2017 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ -A simple and flexible model of spinning reserves that tracks the state of unit -commitment and dispatched capacity to ensures that the generation fleet has -enough up- and down- ramping capacity to satisfy reserve requirements. The -unit commitment module is a prerequisite for spinning reserves. This -formulation does not consider ramping speed or duration requirements, just MW -of upward and downward ramping capability. - -Spinning reserve requirements can be customized through use of configuration -parameters and can include n-1 contingencies (either from generation units or -entire generation plants), as well as variability of load and variable -renewable resources. This lumps together regulating reserves, load following -reserves, and contingency reserves without distinguishing their timescales or -required response duration. Operating reserves at timescales with slower -responses for load following or longer-term recovery from contingencies are not -included here. - -Most regions and countries use distinct terminology for reserves products and -distinct procedures for determining reserve requirements. This module provides -a simple approach to spinning reserve requirements, which can be extended by -other module via registering with dynamic lists. Detailed regional studies may -need to write their own reserve modules to reflect specific regional reserve -definitions and policies. - -Notes: - -This formulation only considers ramping capacity (MW), not duration or speed. -The lack of duration requirements could cause problems if a significant amount -of capacity is energy limited such as demand response, storage, or hydro. -California now has a duration requirement of 3 hours for some classes of -operating reserves. The lack of ramping speed could cause issues if the -generators that are earmarked for providing spinning reserves have significant -differences in ramping speeds that are important to account for. This -formulation could be extended in the future to break reserve products into -different categories based on overall response time (ramping speed & -telemetry), and specify different reserve requirements for various response -times: <1sec, <1 min, <5min, <15min, <1hr, 1day. - -One standard (nonlinear) methodology for calculating reserve requirements -looks something like: k * sqrt(sigma_load^2 + sigma_renewable^2), where k is a -constant reflecting capacity requirements (typically in the range of 3-5), and -sigma's denote standard deviation in units of MW. Depending on the study, -sigma may be calculated on timescales of seconds to minutes. Several studies -estimate the sigmas with linear approximations. Some studies set -sigma_renewable as a function of renewable output, especially for wind where -power output shows the highest variability in the 40-60% output range because -that is the steepest section of its power production curve. This formulation -is not used here because the signma_renewable term would need to be -approximated using renewable power output, making this equation non-linear -with respect to dispatch decision variables. - -Other studies have used linear equations for estimating reserve requirements: - -The Western Wind and Solar Integration study suggested a heuristic of 3% * -load + 5% * renewable_output for spinning reserve capacity requirements, and -the same amount for quick start capacity requirements. - -Halamay 2011 derives spinning reserve requirements of +2.1% / -2.8% of load -and ~ +2% / -3% for renewables to balance natural variability, and derives -non-spinning reserve requirements and +3.5% / -4.0% of load and ~ +/- 4% for -renewables to balance hour-ahead forecast errors. - -Note: Most research appears to be headed towards dynamic and probabilistic -techniques, rather than the static approximations used here. - -References on operating reserves follow. - -Ela, Erik, et al. "Evolution of operating reserve determination in wind power -integration studies." Power and Energy Society General Meeting, 2010 IEEE. -http://www.nrel.gov/docs/fy11osti/49100.pdf - -Milligan, Michael, et al. "Operating reserves and wind power integration: An -international comparison." proc. 9th International Workshop on large-scale -integration of wind power into power systems. 2010. -http://www.nrel.gov/docs/fy11osti/49019.pdf - -Halamay, Douglas A., et al. "Reserve requirement impacts of large-scale -integration of wind, solar, and ocean wave power generation." IEEE -Transactions on Sustainable Energy 2.3 (2011): 321-328. -http://nnmrec.oregonstate.edu/sites/nnmrec.oregonstate.edu/files/PES_GM_2010_HalamayVariability_y09m11d30h13m26_DAH.pdf - -Ibanez, Eduardo, Ibrahim Krad, and Erik Ela. "A systematic comparison of -operating reserve methodologies." PES General Meeting| Conference & -Exposition, 2014 IEEE. http://www.nrel.gov/docs/fy14osti/61016.pdf - +This is an advanced version of the basic spinning_reserves reserves module, and +can be used in place of it (not in addition to). """ import os from collections import defaultdict @@ -92,7 +10,6 @@ from switch_model.utilities import iteritems - dependencies = ( 'switch_model.timescales', 'switch_model.balancing.load_zones', @@ -128,7 +45,7 @@ def define_arguments(argparser): ) # TODO: define these inputs in data files group.add_argument( - '--contingency-reserve-type', + '--contingency-reserve-type', dest='contingency_reserve_type', default='spinning', help= "Type of reserves to use to meet the contingency reserve requirements " @@ -136,7 +53,7 @@ def define_arguments(argparser): "(e.g., 'contingency' or 'spinning'); default is 'spinning'." ) group.add_argument( - '--regulating-reserve-type', + '--regulating-reserve-type', dest='regulating_reserve_type', default='spinning', help= "Type of reserves to use to meet the regulating reserve requirements " @@ -151,23 +68,27 @@ def define_dynamic_lists(m): """ Spinning_Reserve_Requirements and Spinning_Reserve_Provisions are dicts of lists of components that contribute to the requirement or provision - of operating reserves. Entries in each dict are indexed by reserve - type (usually "regulation" or "contingency") and direction ("up" or "down"). + of spinning reserves. Entries in each dict are indexed by reserve + product. In the simple scenario, you may only have a single product called + 'spinning'. In other scenarios where some generators are limited in what + kind of reserves they can provide, you may have "regulation" and + "contingency" reserve products. The dicts are setup as defaultdicts, so they will automatically return an empty list if nothing has been added for a particular type of reserves. - Spinning_Reserve_Requirements contains lists of model components that - increase operating reserve requirements in each balancing area and timepoint. + Spinning_Reserve_Up_Requirements and Spinning_Reserve_Down_Requirements + list model components that increase reserve requirements in each balancing + area and timepoint. - Spinning_Reserve_Provisions contains lists of model components that - help satisfy operating reserve requirements in each balancing area and - timepoint. + Spinning_Reserve_Up_Provisions and Spinning_Reserve_Down_Provisions list + model components that help satisfy spinning reserve requirements in + each balancing area and timepoint. - Spinning_Reserve_Contingencies is a list of model components - describing maximum contingency events. Elements of this list are - summarized into a MaximumContingency variable that is added to the - Spinning_Reserve_Requirements['contingency', 'up'] list. + Spinning_Reserve_Up_Contingencies and Spinning_Reserve_Down_Contingencies + list model components describing maximum contingency events. Elements of + this list are summarized into a MaximumContingency variable that is added + to the Spinning_Reserve_Requirements['contingency'] list. Each component in the Requirements and Provisions lists needs to use units of MW and be indexed by reserve type, balancing area and timepoint. Missing @@ -283,9 +204,12 @@ def gen_project_contingency(m): doc="Largest generating project that could drop offline.") def Enforce_GenProjectLargestContingency_rule(m, g, t): b = m.zone_balancing_area[m.gen_load_zone[g]] - if m.gen_can_provide_spinning_reserves[g]: + if g in m.SPINNING_RESERVE_CAPABLE_GENS: + total_up_reserves = sum( + m.CommitGenSpinningReservesUp[rt, g, t] + for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g]) return m.GenProjectLargestContingency[b, t] >= \ - m.DispatchGen[g, t] + m.CommitGenSpinningReservesUp[g, t] + m.DispatchGen[g, t] + total_up_reserves else: return m.GenProjectLargestContingency[b, t] >= m.DispatchGen[g, t] m.Enforce_GenProjectLargestContingency = Constraint( @@ -297,15 +221,13 @@ def Enforce_GenProjectLargestContingency_rule(m, g, t): m.Spinning_Reserve_Up_Contingencies.append('GenProjectLargestContingency') def hawaii_spinning_reserve_requirements(m): - # This may be more appropriate for a hawaii submodule until it is - # better documented and referenced. - # these parameters were found by regressing the reserve requirements from + # These parameters were found by regressing the reserve requirements from # the GE RPS Study against wind and solar conditions each hour (see # Dropbox/Research/Shared/Switch-Hawaii/ge_validation/source_data/ # reserve_requirements_oahu_scenarios charts.xlsx and # Dropbox/Research/Shared/Switch-Hawaii/ge_validation/ # fit_renewable_reserves.ipynb ) - # TODO: supply all the parameters for this function in input files + # TODO: supply all the parameters for this function in input files. # Calculate and register regulating reserve requirements # (currently only considers variable generation, only underforecasting) @@ -373,22 +295,19 @@ def nrel_3_5_spinning_reserve_requirements(m): be set to WithdrawFromCentralGrid. Otherwise load will be set to zone_demand_mw. """ - def NREL35VarGenSpinningReserveRequirement_rule(m, rt, b, t): try: load = m.WithdrawFromCentralGrid except AttributeError: load = m.zone_demand_mw return ( - 0.03 * sum(load[z, t] for z in m.LOAD_ZONES_IN_BALANCING_AREA[b]) - + - 0.05 * sum( - m.DispatchGen[g, t] - for z in m.ZONES_IN_BALANCING_AREA[b] - for g in m.VARIABLE_GENS_IN_ZONE[z] - if (g, t) in m.VARIABLE_GEN_TPS - ) - ) + 0.03 * sum(load[z, t] + for z in m.LOAD_ZONES + if b == m.zone_balancing_area[z]) + + 0.05 * sum(m.DispatchGen[g, t] + for g in m.VARIABLE_GENS + if (g, t) in m.VARIABLE_GEN_TPS and + b == m.zone_balancing_area[m.gen_load_zone[g]])) m.NREL35VarGenSpinningReserveRequirement = Expression( [m.options.regulating_reserve_type], m.BALANCING_AREA_TIMEPOINTS, @@ -401,8 +320,7 @@ def NREL35VarGenSpinningReserveRequirement_rule(m, rt, b, t): def define_components(m): """ contingency_safety_factor is a parameter that increases the contingency - requirements. By default this is set to 2.0 to prevent the largest - generator from providing reserves for itself. + requirements. This is defaults to 1.0. GEN_SPINNING_RESERVE_TYPES is a set of all allowed reserve types for each generation project. This is read from generation_projects_reserve_availability.tab. @@ -412,11 +330,9 @@ def define_components(m): capacity that can be used to provide each type of reserves. It is indexed by GEN_SPINNING_RESERVE_TYPES. This is read from generation_projects_reserve_availability.tab and defaults to 1 if not specified. - - SPINNING_RESERVE_CAPABLE_GEN_TPS is a subset of GEN_TPS of generators that can - provide spinning reserves based on gen_can_provide_spinning_reserves. + provide spinning reserves based on generation_projects_reserve_capability.tab. CommitGenSpinningReservesUp[(g,t) in SPINNING_SPINNING_RESERVE_CAPABLE_GEN_TPS] is a decision variable of how much upward spinning reserve capacity to commit @@ -450,11 +366,6 @@ def define_components(m): default=1.0 ) - # TODO: go back to calling all of these spinning reserves instead of operating reserves, - # since they all have to be backed by spinning (committed) capacity for now. Also prefix - # "reserve" with "spinning" everywhere, to distinguish from planning reserves. Then the - # terminology will be pretty similar to the current spinning_reserves module. - # reserve types that are supplied by generation projects # and generation projects that can provide reserves # note: these are also the indexing sets of the above set arrays; maybe that could be used? @@ -676,13 +587,20 @@ def load_inputs(m, switch_data, inputs_dir): spinning_reserve_params.dat may override the default value of contingency_safety_factor. Note that this is a .dat file, not a .tab file. """ + path=os.path.join(inputs_dir, 'generation_projects_reserve_capability.tab') switch_data.load_aug( - filename=os.path.join(inputs_dir, 'generation_projects_reserve_capability.tab'), + filename=path, + optional=True, auto_select=True, optional_params=['gen_reserve_type_max_share]'], index=m.GEN_SPINNING_RESERVE_TYPES, param=(m.gen_reserve_type_max_share) ) + if not os.path.isfile(path): + gen_projects = switch_data.data()['GENERATION_PROJECTS'][None] + switch_data.data()['GEN_SPINNING_RESERVE_TYPES'] = {} + switch_data.data()['GEN_SPINNING_RESERVE_TYPES'][None] = \ + [(g, "spinning") for g in gen_projects] switch_data.load_aug( filename=os.path.join(inputs_dir, 'spinning_reserve_params.dat'), diff --git a/switch_model/hawaii/hydrogen.py b/switch_model/hawaii/hydrogen.py index 612c795cd..b5bd05e4a 100644 --- a/switch_model/hawaii/hydrogen.py +++ b/switch_model/hawaii/hydrogen.py @@ -197,7 +197,7 @@ def define_components(m): m.HydrogenSpinningReserveDown = Expression( m.BALANCING_AREA_TIMEPOINTS, rule=lambda m, b, t: \ - sum(m.HydrogenSlackDown[g, t] + sum(m.HydrogenSlackDown[z, t] for z in m.ZONES_IN_BALANCING_AREA[b]) ) m.Spinning_Reserve_Down_Provisions.append('HydrogenSpinningReserveDown') From df0e604ff5da7a1015956fb973e4a7f42317d3c8 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Fri, 23 Feb 2018 16:17:49 -1000 Subject: [PATCH 19/51] require users to select a reserve allocation rule or explicitly state 'none'. --- .../operating_reserves/spinning_reserves_advanced.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py b/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py index e84671687..ec3bdb879 100644 --- a/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py +++ b/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py @@ -119,12 +119,14 @@ def define_arguments(argparser): "Unlike unit contingencies, this is a purely linear expression.") ) group.add_argument('--spinning-requirement-rule', default=None, - choices = ["Hawaii", "3+5"], + choices = ["Hawaii", "3+5", "none"], help=("Choose rules for spinning reserves requirements as a function " "of variable renewable power and load. Hawaii uses rules " "bootstrapped from the GE RPS study, and '3+5' requires 3% of " "load and 5% of variable renewable output, based on the heuristic " - "described in the 2010 Western Wind and Solar Integration Study.") + "described in the 2010 Western Wind and Solar Integration Study. " + "Specify 'none' if applying your own rules instead. " + ) ) # TODO: define these inputs in data files group.add_argument( @@ -141,7 +143,7 @@ def define_arguments(argparser): help= "Type of reserves to use to meet the regulating reserve requirements " "defined by the spinning requirements rule (e.g., 'spinning' or " - "'contingency'); default is 'spinning'." + "'regulation'); default is 'spinning'." ) @@ -556,6 +558,10 @@ def rule(m): hawaii_spinning_reserve_requirements(m) elif m.options.spinning_requirement_rule == '3+5': nrel_3_5_spinning_reserve_requirements(m) + elif m.options.spinning_requirement_rule == 'none': + pass # users can turn off the rules and use their own instead + else: + raise ValueError('No --spinning-requirement-rule specified on command line; unable to allocate reserves.') def define_dynamic_components(m): From 049b2c9f0612244cabb15b8c5fc828329551757d Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Fri, 23 Feb 2018 16:23:23 -1000 Subject: [PATCH 20/51] Hawaii package: fix bugs, improve code and represent PSIP more accurately --- .../hawaii/batteries_fixed_calendar_life.py | 16 +- switch_model/hawaii/hydrogen.py | 2 +- switch_model/hawaii/psip_2016_12.py | 286 ++++++++++++------ switch_model/hawaii/save_results.py | 12 +- switch_model/hawaii/scenario_data.py | 106 ++++--- 5 files changed, 285 insertions(+), 137 deletions(-) diff --git a/switch_model/hawaii/batteries_fixed_calendar_life.py b/switch_model/hawaii/batteries_fixed_calendar_life.py index 228e34ec4..e5ddc50a8 100644 --- a/switch_model/hawaii/batteries_fixed_calendar_life.py +++ b/switch_model/hawaii/batteries_fixed_calendar_life.py @@ -86,32 +86,34 @@ def define_components(m): m.Battery_Max_Charge_Rate = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: m.ChargeBattery[z, t] <= - m.Battery_Capacity[z, m.tp_period[t]] * m.battery_max_discharge / m.battery_min_discharge_time + # changed 2018-02-20 to allow full discharge in min_discharge_time, + # (previously pegged to battery_max_discharge) + m.Battery_Capacity[z, m.tp_period[t]] / m.battery_min_discharge_time ) m.Battery_Max_Discharge_Rate = Constraint(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: m.DischargeBattery[z, t] <= - m.Battery_Capacity[z, m.tp_period[t]] * m.battery_max_discharge / m.battery_min_discharge_time + m.Battery_Capacity[z, m.tp_period[t]] / m.battery_min_discharge_time ) # how much could output/input be increased on short notice (to provide reserves) m.BatterySlackUp = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.Battery_Capacity[z, m.tp_period[t]] * m.battery_max_discharge / m.battery_min_discharge_time + m.Battery_Capacity[z, m.tp_period[t]] / m.battery_min_discharge_time - m.DischargeBattery[z, t] + m.ChargeBattery[z, t] ) m.BatterySlackDown = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.Battery_Capacity[z, m.tp_period[t]] * m.battery_max_discharge / m.battery_min_discharge_time + m.Battery_Capacity[z, m.tp_period[t]] / m.battery_min_discharge_time - m.ChargeBattery[z, t] + m.DischargeBattery[z, t] ) - # assume batteries can only complete one full cycle (charged to max discharge) - # per day, averaged over each period + # assume batteries can only complete one full cycle per day, averaged over each period + # (this was pegged to battery_max_discharge before 2018-02-20) m.Battery_Cycle_Limit = Constraint(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: sum(m.DischargeBattery[z, tp] * m.tp_duration_hrs[tp] for tp in m.TPS_IN_PERIOD[p]) <= - m.Battery_Capacity[z, p] * m.battery_max_discharge * m.period_length_hours[p] + m.Battery_Capacity[z, p] * m.period_length_hours[p] ) # Register with spinning reserves if it is available diff --git a/switch_model/hawaii/hydrogen.py b/switch_model/hawaii/hydrogen.py index 612c795cd..b5bd05e4a 100644 --- a/switch_model/hawaii/hydrogen.py +++ b/switch_model/hawaii/hydrogen.py @@ -197,7 +197,7 @@ def define_components(m): m.HydrogenSpinningReserveDown = Expression( m.BALANCING_AREA_TIMEPOINTS, rule=lambda m, b, t: \ - sum(m.HydrogenSlackDown[g, t] + sum(m.HydrogenSlackDown[z, t] for z in m.ZONES_IN_BALANCING_AREA[b]) ) m.Spinning_Reserve_Down_Provisions.append('HydrogenSpinningReserveDown') diff --git a/switch_model/hawaii/psip_2016_12.py b/switch_model/hawaii/psip_2016_12.py index 2f0eafbaa..c7c12494b 100644 --- a/switch_model/hawaii/psip_2016_12.py +++ b/switch_model/hawaii/psip_2016_12.py @@ -1,7 +1,12 @@ from __future__ import division +from collections import defaultdict +from textwrap import dedent import os from pyomo.environ import * +def TODO(note): + raise NotImplementedError(dedent(note)) + def define_arguments(argparser): argparser.add_argument('--psip-force', action='store_true', default=True, help="Force following of PSIP plans (retiring AES and building certain technologies).") @@ -12,6 +17,11 @@ def define_arguments(argparser): argparser.add_argument('--force-build', nargs=3, default=None, help="Force construction of at least a certain quantity of a particular technology during certain years. Space-separated list of year, technology and quantity.") +def is_renewable(tech): + return any(txt in tech for txt in ("PV", "Wind", "Solar")) +def is_battery(tech): + return 'battery' in tech.lower() + def define_components(m): ################### # resource rules to match HECO's 2016-04-01 PSIP @@ -50,74 +60,82 @@ def define_components(m): # SWITCH profiles seem to be more accurate, so we optimize against them # and show that this may give (small) savings vs. the RESOLVE plan. + # TODO: Should I use Switch to investigate how much of HECO's poor performance is due + # to using bad resource profiles (small onshore wind that doesn't rise in the rankings), + # how much is due to capping PV at 300 MW in 2020, + # how much is due to non-integrality in RESOLVE (fixed by later jimmying by HECO), and + # how much is due to forcing in elements before and after the optimization? + + # NOTE: I briefly moved future DistPV to the existing plants workbook, with the idea that + # we assume the same forecasted adoption occurs with or without the PSIP. That approach + # also spread the DistPV adoption among the top half of tranches, rather than allowing + # Switch to cherry-pick the best tranches. However, that approach was ineffective because + # Switch was still able to add (and did add) DistPV from the lower tranches. That could + # have been fixed up in import_data.py, or the DistPV could have been moved here, into + # technology_targets_definite. However, on further reflection, forcing DistPV installations + # to always match the PSIP forecast seems artificial -- it might be better to do DistPV + # than utility-scale PV, and there's no reason to preclude that in the non-PSIP plans. + # (Although it's probably not worth dwelling long on differences if they arise, since they + # won't make a huge difference in cost.) So now the DistPV is treated as just another optional + # part of the PSIP plan. Note that this allows Switch to cherry-pick among the best DistPV + # tranches to meet the PSIP, but that is a little conservative (favorable to HECO), because + # Switch can also do that for the non-PSIP scenarios. Also, these targets are roughly equal + # to the top half of the DistPV tranches, so there's not much cherry-picking going on anyway. + # This could be resolved by setting (optional) project-specific targets in this module, + # or by making the DistPV tranches coarser (e.g., upper half, third quartile, fourth quartile), + # which seems like a good idea for representing the general precision of DistPV policies + # anyway. + + # TODO (maybe): set project-specific targets, so that DistPV targets can be spread among tranches + # and specific projects in the PSIP can be represented accurately (really just NPM wind). This + # might also allow reconstruction of exactly the same existing or PSIP project when retired + # (as specified in the PSIP). Currently the code below lets Switch choose the best project with the + # same technology when it replaces retired renewable projects. + # targets for individual generation technologies # (year, technology, MW added) # TODO: allow either CentralFixedPV or CentralTrackingPV for utility-scale solar # (not urgent now, since CentralFixedPV is not currently in the model) - def annual(start_year, end_year, start_amount, end_amount): - # should be applied to each year including end year, but not start year - return (end_amount-start_amount)/(end_year-start_year) - - # technologies that are definitely being built (we assume near-term - # are underway and military projects are being built for their own - # reasons) - technology_targets_definite = [ - (2016, 'CentralTrackingPV', 27.6), # Waianae Solar by Eurus Energy America - (2018, 'IC_Schofield', 54.0), - - # Distributed PV from Table J-1 of PSIP - # TODO: check that this matches Resolve inputs - # This is treated as definite, so we don't get caught up in "you could save - # a little money by building Central PV instead of distributed." Probably - # appropriate, since it's a forecast, not a decision anyway. - (2016, 'DistPV', 471 - 444), # net of 444 MW of pre-existing DistPV, also counted in 2016 - - (2017, 'DistPV', annual(2016, 2020, 471, 856)), - (2018, 'DistPV', annual(2016, 2020, 471, 856)), - (2019, 'DistPV', annual(2016, 2020, 471, 856)), - (2020, 'DistPV', annual(2016, 2020, 471, 856)), + # Technologies that are definitely being built (at least have permits already.) + # (Note: these have all been moved into the existing plants workbook.) + technology_targets_definite = [] - (2021, 'DistPV', annual(2020, 2030, 856, 1169)), - (2022, 'DistPV', annual(2020, 2030, 856, 1169)), - (2023, 'DistPV', annual(2020, 2030, 856, 1169)), - (2024, 'DistPV', annual(2020, 2030, 856, 1169)), - (2025, 'DistPV', annual(2020, 2030, 856, 1169)), - (2026, 'DistPV', annual(2020, 2030, 856, 1169)), - (2027, 'DistPV', annual(2020, 2030, 856, 1169)), - (2028, 'DistPV', annual(2020, 2030, 856, 1169)), - (2029, 'DistPV', annual(2020, 2030, 856, 1169)), - (2030, 'DistPV', annual(2020, 2030, 856, 1169)), - - (2031, 'DistPV', annual(2030, 2040, 1169, 1517)), - (2032, 'DistPV', annual(2030, 2040, 1169, 1517)), - (2033, 'DistPV', annual(2030, 2040, 1169, 1517)), - (2034, 'DistPV', annual(2030, 2040, 1169, 1517)), - (2035, 'DistPV', annual(2030, 2040, 1169, 1517)), - (2036, 'DistPV', annual(2030, 2040, 1169, 1517)), - (2037, 'DistPV', annual(2030, 2040, 1169, 1517)), - (2038, 'DistPV', annual(2030, 2040, 1169, 1517)), - (2039, 'DistPV', annual(2030, 2040, 1169, 1517)), - (2040, 'DistPV', annual(2030, 2040, 1169, 1517)), - - (2041, 'DistPV', annual(2040, 2045, 1517, 1697)), - (2042, 'DistPV', annual(2040, 2045, 1517, 1697)), - (2043, 'DistPV', annual(2040, 2045, 1517, 1697)), - (2044, 'DistPV', annual(2040, 2045, 1517, 1697)), - (2045, 'DistPV', annual(2040, 2045, 1517, 1697)), - # replace prebuilt capacity (counted in 2016, so retired in 2041) - (2041, 'DistPV', 444), - # replace PSIP capacity built before 2020, which was counted in 2020 (retires in 2045) - (2045, 'DistPV', 856-444), - ] - # technologies proposed in PSIP but which may not be built if a - # better plan is found + # add targets specified on the command line + if m.options.force_build is not None: + b = list(m.options.force_build) + b[0] = int(b[0]) # year + b[2] = float(b[2]) # quantity + b = tuple(b) + print "Forcing build: {}".format(b) + technology_targets_definite.append(b) + + # technologies proposed in PSIP but which may not be built if a better plan is found. + # All from final plan in Table 4-1 of PSIP 2016-12-23 sometimes cross-referenced with PLEXOS inputs. + # These differ somewhat from inputs to RESOLVE or the RESOLVE plans in Table 3-1 and 3-4, but + # they represent HECO's final plan as reported in the PSIP. technology_targets_psip = [ - (2018, 'OnshoreWind', 24), # Na Pua Makani (NPM) wind - (2018, 'CentralTrackingPV', 109.6), # replacement for canceled SunEdison projects - (2018, 'OnshoreWind', 10), # CBRE wind + # Na Pua Makani (NPM) wind (still awaiting approval as of Feb. 2018) note: this is at a + # specific location (21.668 -157.956), but since it isn't in the existing plants + # workbook, we represent it as a generic technology target. + # note: Resolve modeled 134 MW of planned onshore wind, 30 MW of optional onshore + # and 800 MW of optional offshore; See "data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input and Output Files by Case/E3 and Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/capacity_limits.tab". + # planned seems to correspond to Na Pua Makani (24), CBRE (10), Kahuku (30), Kawailoka (69); + # Resolve built 273 MW offshore in 2025-45 (including 143 MW rebuilt in 2045), + # and 30 MW onshore in 2045 (tables 3-1 and 3-4). + # Not clear why it picked offshore before onshore (maybe bad resource profiles?). But + # in their final plan (table 4-1), HECO changed it to 200 MW offshore in 2025 (presumably rebuilt + # in 2045) and 30 MW onshore in 2045. + (2018, 'OnshoreWind', 24), # Na Pua Makani (NPM) wind + (2018, 'OnshoreWind', 10), # CBRE wind + # note: 109.6 MW SunEdison replacements are in Existing Plants workbook. + + # note: RESOLVE had 53.6 MW of planned PV, which is probably Waianae (27.6), Kalaeloa (5) + # and West Loch (20). Then it added these (table 3-1): 2020: 300 MW (capped, see "renewable_limits.tab"), + # 2022: 48 MW, 2025: 193 MW, 2040: 541 (incl. 300 MW rebuild), 2045: 1400 MW (incl. 241 MW rebuild). + # HECO converted this to 109.6 MW of replacement SunEdison waiver projects in 2018 + # (we list those as "existing") and other additions shown below. (2018, 'CentralTrackingPV', 15), # CBRE PV - (2019, 'CentralTrackingPV', 20), # West Loch PV (2020, 'CentralTrackingPV', 180), (2022, 'CentralTrackingPV', 40), (2022, 'IC_Barge', 100.0), # JBPHH plant @@ -128,22 +146,85 @@ def annual(start_year, end_year, start_amount, end_amount): (2040, 'CentralTrackingPV', 280), (2045, 'CentralTrackingPV', 1180), (2045, 'IC_MCBH', 68.0), # proxy for 68 MW of generic ICE capacity - # restrict construction of batteries - (2022, 'LoadShiftBattery', 426), - (2025, 'LoadShiftBattery', 29), - (2030, 'LoadShiftBattery', 165), - (2035, 'LoadShiftBattery', 168), - (2040, 'LoadShiftBattery', 420), - (2045, 'LoadShiftBattery', 1525), - ] - if m.options.force_build is not None: - b = list(m.options.force_build) - b[0] = int(b[0]) # year - b[2] = float(b[2]) # quantity - b = tuple(b) - print "Forcing build: {}".format(b) - technology_targets_definite.append(b) + # batteries (MW) + # from PSIP 2016-12-23 Table 4-1; also see energy ("capacity") and power files in + # "data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/Battery" + # (note: we mistakenly treated these as MWh quantities instead of MW before 2018-02-20) + (2019, 'Battery_Conting', 90), + (2022, 'Battery_4', 426), + (2025, 'Battery_4', 29), + (2030, 'Battery_4', 165), + (2035, 'Battery_4', 168), + (2040, 'Battery_4', 420), + (2045, 'Battery_4', 1525), + # RESOLVE modeled 4-hour batteries as being capable of providing reserves, + # and didn't model contingency batteries (see data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input and Output Files by Case/E3 and Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/technologies.tab). + # Then HECO added a 90 MW contingency battery (table 4-1 of PSIP 2016-12-23). + # Note: RESOLVE can get reserves from batteries (they only considered 4-hour batteries), but not + # from EVs or flexible demand. + # DR: Looking at RESOLVE inputs, it seems like they take roughly 4% of load, and allow it to be doubled + # or cut to zero each hour (need to double-check this beyond first day). Maybe this includes EVs? + # (no separate sign of EVs). + # TODO: check Resolve load levels against Switch. + # TODO: maybe I should switch over to using the ABC curves and load profiles that HECO used with PLEXOS + # (for all islands). + # TODO: Did HECO assume 4-hour batteries, demand response or EVs could provide reserves when running PLEXOS? + # - all of these seem unlikely, but we have to ask HECO to find out; PLEXOS files are unclear. + + # installations based on changes in installed capacity shown in + # data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input and Output Files by Case/E3 Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/planned_installed_capacities.tab + # Also see Figure J-10 of 2016-12-23 PSIP (Vol. 3), which matches these levels (excluding FIT(?)). + # Note: code further below adds in reconstruction of early installations + (2020, "DistPV", 606.3-444), # net of 444 installed as of 2016 (in existing generators workbook) + (2022, "DistPV", 680.3-606.3), + (2025, "DistPV", 744.9-680.3), + (2030, "DistPV", 868.7-744.9), + (2035, "DistPV", 1015.4-868.7), + (2040, "DistPV", 1163.4-1015.4), + (2045, "DistPV", 1307.9-1163.4), + ] + + # Rebuild renewable projects at retirement (20 years), as specified in the PSIP + # note: this doesn't include DistPV, because those are part of a forecast, not a plan, so they already + # get reconstructed in the existing generators workbook, whether or not the PSIP plan is used. + + # note: this behavior is consistent with the following: + # discussion on p. 3-8 of PSIP 2016-12-23 vol. 1. + # Resolve applied planned wind and solar as set levels through 2045, not set additions in each year. + # Table 4-1 shows final plans that were sent to Plexos; Plexos input files in + # data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/Theme 5 + # show optional capacity built in 2020 or 2025 (in list below) continuing in service in 2045. + # and Plexos input files in data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/PSIP Max Capacity.csv + # don't show any retirements of wind and solar included as "planned" in RESOLVE and "existing" in Switch + # (Waivers PV1, West Loch; Kawailoa may be omitted?) + # also note: Plexos input files in XX + # show max battery capacity equal to sum of all prior additions + + # projects from existing plants workbook (pasted in) + existing_techs = [ + (2011, "OnshoreWind", 30), + (2012, "OnshoreWind", 69), + (2012, "CentralTrackingPV", 5), + (2016, "CentralTrackingPV", 27.6), + (2016, "DistPV", 444), + (2018, "IC_Schofield", 54.98316), + (2018, "CentralTrackingPV", 49), + (2018, "CentralTrackingPV", 14.7), + (2018, "CentralTrackingPV", 46), + (2018, "CentralTrackingPV", 20), + ] + existing_techs += technology_targets_definite + existing_techs += technology_targets_psip + # rebuild all renewables at retirement (20 years for RE, 15 years for batteries) + rebuild_targets = [ + (y+20, tech, cap) for y, tech, cap in existing_techs if is_renewable(tech) + ] + [ + (y+15, tech, cap) for y, tech, cap in existing_techs if is_battery(tech) + ] # note: early batteries won't quite need 2 replacements + # don't schedule rebuilding past end of study + rebuild_targets = [t for t in rebuild_targets if t[0] <= 2045] + technology_targets_psip += rebuild_targets # make sure LNG is turned off if psip and getattr(m.options, "force_lng_tier", []) != ["none"]: @@ -157,8 +238,15 @@ def annual(start_year, end_year, start_amount, end_amount): # make a special list including all standard generation technologies plus "LoadShiftBattery" m.GEN_TECHS_AND_BATTERIES = Set(initialize=lambda m: [g for g in m.GENERATION_TECHNOLOGIES] + ["LoadShiftBattery"]) + # make a list of renewable technologies + m.RENEWABLE_TECHNOLOGIES = Set( + initialize=m.GENERATION_TECHNOLOGIES, + filter=lambda m, tech: is_renewable(tech) + ) + def technology_target_init(m, per, tech): - """Find the amount of each technology that is targeted to be built by the start of each period.""" + """Find the amount of each technology that is targeted to be built between the start of the + previous period and the start of the current period.""" start = 2000 if per == m.PERIODS.first() else m.PERIODS.prev(per) end = per target = sum( @@ -168,34 +256,48 @@ def technology_target_init(m, per, tech): return target m.technology_target = Param(m.PERIODS, m.GEN_TECHS_AND_BATTERIES, initialize=technology_target_init) + def MakeGenTechDicts_rule(m): + # get unit sizes of all technologies + unit_sizes = m.gen_tech_unit_size_dict = defaultdict(float) + for g, unit_size in m.gen_unit_size.iteritems(): + tech = m.gen_tech[g] + if tech in unit_sizes: + if unit_sizes[tech] != unit_size: + raise ValueError("Generation technology {} uses different unit sizes for different projects.") + else: + unit_sizes[tech] = unit_size + # get predetermined capacity for all technologies + predet_cap = m.gen_tech_predetermined_cap_dict = defaultdict(float) + for (g, per), cap in m.gen_predetermined_cap.iteritems(): + tech = m.gen_tech[g] + predet_cap[tech, per] += cap + m.MakeGenTechDicts = BuildAction(rule=MakeGenTechDicts_rule) + # with PSIP: BuildGen is zero except for technology_targets # (sum during each period or before first period) # without PSIP: BuildGen is >= definite targets def Enforce_Technology_Target_rule(m, per, tech): """Enforce targets for each technology; exact target for PSIP cases, minimum target for non-PSIP.""" - - def adjust_psip_credit(g, target): - if g in m.DISCRETELY_SIZED_GENS and target > 0.0: - # Rescale so that the n integral units that come closest - # to the target gets counted as the n.n fractional units - # needed to exactly meet the target. - # This is needed because some of the targets are based on - # nominal unit sizes rather than actual max output. - return (target / m.gen_unit_size[g]) / round(target / m.gen_unit_size[g]) - else: - return 1.0 - target = m.technology_target[per, tech] - + # get target, including any capacity specified in the predetermined builds, + # so the target will be additional to those + target = m.technology_target[per, tech] + m.gen_tech_predetermined_cap_dict[tech, per] + + # convert target to closest integral number of units + # (some of the targets are based on nominal unit sizes rather than actual max output) + if m.gen_tech_unit_size_dict[tech] > 0.0: + target = round(target / m.gen_tech_unit_size_dict[tech]) * m.gen_tech_unit_size_dict[tech] + if tech == "LoadShiftBattery": # special treatment for batteries, which are not a standard technology if hasattr(m, 'BuildBattery'): - build = sum(m.BuildBattery[z, per] for z in m.LOAD_ZONES) + # note: BuildBattery is in MWh, so we convert to MW + build = sum(m.BuildBattery[z, per] for z in m.LOAD_ZONES) / m.battery_min_discharge_time else: build = 0 else: build = sum( - m.BuildGen[g, per] * adjust_psip_credit(g, target) + m.BuildGen[g, per] for g in m.GENERATION_PROJECTS if m.gen_tech[g] == tech and (g, per) in m.GEN_BLD_YRS ) @@ -212,7 +314,7 @@ def adjust_psip_credit(g, target): return Constraint.Infeasible elif psip: return (build == target) - elif m.options.psip_minimal_renewables and any(txt in tech for txt in ["PV", "Wind", "Solar"]): + elif m.options.psip_minimal_renewables and tech in m.RENEWABLE_TECHNOLOGIES: # only build the specified amount of renewables, no more return (build == target) else: diff --git a/switch_model/hawaii/save_results.py b/switch_model/hawaii/save_results.py index 6f598298a..7cf1e7bab 100644 --- a/switch_model/hawaii/save_results.py +++ b/switch_model/hawaii/save_results.py @@ -175,18 +175,24 @@ def write_results(m, outputs_dir): +tuple( sum( DispatchGenByFuel(m, p, t, f) - for p in m.GENERATION_PROJECTS_BY_FUEL[f] if (p, t) in m.GEN_TPS + for p in m.GENERATION_PROJECTS_BY_FUEL[f] + if (p, t) in m.GEN_TPS and m.gen_load_zone[p] == z ) for f in m.FUELS ) +tuple( - sum(util.get(m.DispatchGen, (p, t), 0.0) for p in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s]) + sum( + util.get(m.DispatchGen, (p, t), 0.0) + for p in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s] + if m.gen_load_zone[p] == z + ) for s in m.NON_FUEL_ENERGY_SOURCES ) +tuple( sum( util.get(m.DispatchUpperLimit, (p, t), 0.0) - util.get(m.DispatchGen, (p, t), 0.0) for p in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s] + if m.gen_load_zone[p] == z ) for s in m.NON_FUEL_ENERGY_SOURCES ) @@ -225,7 +231,7 @@ def gen_energy_source(g): built_energy_source = tuple(sorted(set(gen_energy_source(g) for g in built_gens))) battery_capacity_mw = lambda m, z, pe: ( - (m.Battery_Capacity[z, pe] * m.battery_max_discharge / m.battery_min_discharge_time) + (m.Battery_Capacity[z, pe] / m.battery_min_discharge_time) if hasattr(m, "Battery_Capacity") else 0.0 ) diff --git a/switch_model/hawaii/scenario_data.py b/switch_model/hawaii/scenario_data.py index b31aed4c3..65f4c8349 100644 --- a/switch_model/hawaii/scenario_data.py +++ b/switch_model/hawaii/scenario_data.py @@ -96,11 +96,14 @@ def write_tables(**args): # (switch_model.timescales can handle that now), # and it lets period_end be a floating point number # (postgresql will export it with a .0 in this case) + # note: despite the comments above, this rounded period_end to + # the nearest whole number until 2018-02-17. This was removed to + # support fractional years for monthly batches in production-cost models. write_table('periods.tab', with_period_length + """ SELECT p.period AS "INVESTMENT_PERIOD", p.period as period_start, - round(p.period + period_length) as period_end + p.period + period_length as period_end FROM study_periods p JOIN period_length l USING (period) WHERE time_sample = %(time_sample)s ORDER by 1; @@ -124,6 +127,19 @@ def write_tables(**args): ORDER BY period, extract(doy from date), study_hour; """, args) + # double-check that arguments are valid + cur = db_cursor() + cur.execute( + 'select * from generator_costs_by_year where cap_cost_scen_id = %(cap_cost_scen_id)s', + args + ) + if len([r for r in cur]) == 0: + print "================================================================" + print "WARNING: no records found in generator_costs_by_year for cap_cost_scen_id='{}'".format(args['cap_cost_scen_id']) + print "================================================================" + time.sleep(2) + del cur + ######################### # create temporary tables that can be referenced by other queries # to identify available projects and technologies @@ -131,7 +147,7 @@ def write_tables(**args): DROP TABLE IF EXISTS study_length; CREATE TEMPORARY TABLE study_length AS {} - SELECT min(period) as study_start, max(period+period_length) AS study_end + SELECT min(period)::real as study_start, max(period+period_length)::real AS study_end FROM period_length; DROP TABLE IF EXISTS study_projects; @@ -167,7 +183,6 @@ def write_tables(**args): FROM generator_info g JOIN study_projects p USING (technology); """.format(with_period_length), args) - ######################### # financials @@ -407,6 +422,11 @@ def write_tables(**args): # not $/kW, $/kWh or $/kW-year. # NOTE: for now, we only specify storage costs per unit of power, not # on per unit of energy, so we insert $0 as the energy cost here. + # NOTE: projects should have NULL for overnight cost and fixed O&M in + # proj_existing_builds if they have an entry for the same year in + # generator_costs_by_year. If they have costs in both, they will both + # get passed through to the data table, and Switch will raise an error + # (as it should, because costs are ambiguous in this case). write_table('gen_build_costs.tab', """ WITH gen_build_costs AS ( SELECT @@ -418,32 +438,46 @@ def write_tables(**args): CASE WHEN i.gen_storage_efficiency IS NULL THEN NULL ELSE 0.0 END AS gen_storage_energy_overnight_cost, i.fixed_o_m * 1000.0 * power(1.0+%(inflation_rate)s, %(base_financial_year)s-i.base_year) - AS gen_fixed_o_m + AS gen_fixed_o_m, + i.min_vintage_year FROM study_generator_info i JOIN generator_costs_by_year c USING (technology) - JOIN study_periods p ON p.period = c.year - WHERE time_sample = %(time_sample)s - AND (i.min_vintage_year IS NULL OR c.year >= i.min_vintage_year) - AND c.cap_cost_scen_id = %(cap_cost_scen_id)s + WHERE c.cap_cost_scen_id = %(cap_cost_scen_id)s ORDER BY 1, 2 ) - SELECT + SELECT -- costs specified in proj_existing_builds "GENERATION_PROJECT", - build_year, - sum(proj_overnight_cost * 1000.0 * proj_existing_cap) / sum(proj_existing_cap) + b.build_year, + SUM(b.proj_overnight_cost * 1000.0 * proj_existing_cap) / sum(proj_existing_cap) AS gen_overnight_cost, null AS gen_storage_energy_overnight_cost, - sum(proj_fixed_om * 1000.0 * proj_existing_cap) / sum(proj_existing_cap) + SUM(b.proj_fixed_om * 1000.0 * proj_existing_cap) / sum(proj_existing_cap) AS gen_fixed_om - FROM study_projects JOIN proj_existing_builds USING (project_id) + FROM study_projects p + JOIN proj_existing_builds b USING (project_id) + WHERE (b.proj_overnight_cost IS NOT NULL OR b.proj_fixed_om IS NOT NULL) GROUP BY 1, 2 UNION - SELECT "GENERATION_PROJECT", build_year, gen_overnight_cost, + SELECT -- costs specified in generator_costs_by_year + "GENERATION_PROJECT", c.build_year, gen_overnight_cost, gen_storage_energy_overnight_cost, gen_fixed_o_m - FROM gen_build_costs JOIN study_projects USING (technology) + FROM study_projects proj + JOIN gen_build_costs c USING (technology) + LEFT JOIN study_periods per ON (per.time_sample = %(time_sample)s AND c.build_year = per.period) + LEFT JOIN proj_existing_builds e ON (e.project_id = proj.project_id AND e.build_year = c.build_year) + WHERE + -- note: this allows users to have build_year < min_vintage_year for predetermined projects + -- that have entries in the cost table, e.g., if they want to prespecify some, but postpone + -- additional construction until some later year (unlikely) + (per.period IS NOT NULL AND (c.min_vintage_year IS NULL OR c.build_year >= c.min_vintage_year)) + OR e.project_id IS NOT NULL ORDER BY 1, 2; """, args) + if args['base_financial_year'] != 2016: + print "WARNING: capital costs for existing plants were stored in the database with a 2016 base year" + print "WARNING: and have not been updated to the scenario base year of {}.".format(args['base_financial_year']) + ######################### # spinning_reserves_advanced # args['max_reserve_capability'] is a list of tuples of (technology, reserve_type) @@ -458,11 +492,12 @@ def write_tables(**args): res_args['reserve_technologies']=reserve_technologies res_args['reserve_types']=reserve_types + # note: casting is needed if the lists are empty; see https://stackoverflow.com/a/41893576/3830997 write_table('generation_projects_reserve_capability.tab', """ WITH reserve_capability (technology, reserve_type) as ( - SELECT - UNNEST(%(reserve_technologies)s) AS technology, - UNNEST(%(reserve_types)s) AS reserve_type + SELECT + UNNEST(%(reserve_technologies)s::varchar(40)[]) AS technology, + UNNEST(%(reserve_types)s::varchar(20)[]) AS reserve_type ), reserve_types (rank, reserve_type) as ( VALUES @@ -649,22 +684,25 @@ def write_tables(**args): ######################### # batteries - # (now included as standard storage projects) - # bat_years = 'BATTERY_CAPITAL_COST_YEARS' - # bat_cost = 'battery_capital_cost_per_mwh_capacity_by_year' - # write_dat_file( - # 'batteries.dat', - # sorted([k for k in args if k.startswith('battery_') and k not in [bat_years, bat_cost]]), - # args - # ) - # if bat_years in args and bat_cost in args: - # # annual costs were provided -- write those to a tab file - # write_tab_file( - # 'battery_capital_cost.tab', - # headers=[bat_years, bat_cost], - # data=zip(args[bat_years], args[bat_cost]), - # arguments=args - # ) + # (now included as standard storage projects, but kept here + # to support older projects that haven't upgraded yet) + bat_years = 'BATTERY_CAPITAL_COST_YEARS' + bat_cost = 'battery_capital_cost_per_mwh_capacity_by_year' + non_cost_bat_vars = sorted([k for k in args if k.startswith('battery_') and k not in [bat_years, bat_cost]]) + if non_cost_bat_vars: + write_dat_file( + 'batteries.dat', + non_cost_bat_vars, + args + ) + if bat_years in args and bat_cost in args: + # annual costs were provided -- write those to a tab file + write_tab_file( + 'battery_capital_cost.tab', + headers=[bat_years, bat_cost], + data=zip(args[bat_years], args[bat_cost]), + arguments=args + ) ######################### # EV annual energy consumption From a461226348dc9275d748b1609a5855e11ceea418 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Fri, 2 Mar 2018 07:06:55 -1000 Subject: [PATCH 21/51] merge transmission_lines.tab and trans_optional_params.tab; allow island scenarios --- switch_model/transmission/transport/build.py | 43 +++++++++++++------- switch_model/upgrade/upgrade_2_0_0b4.py | 10 +++++ 2 files changed, 38 insertions(+), 15 deletions(-) diff --git a/switch_model/transmission/transport/build.py b/switch_model/transmission/transport/build.py index 88ed5d14f..878a0aebd 100644 --- a/switch_model/transmission/transport/build.py +++ b/switch_model/transmission/transport/build.py @@ -174,7 +174,11 @@ def define_components(mod): mod.TRANSMISSION_LINES = Set() mod.trans_lz1 = Param(mod.TRANSMISSION_LINES, within=mod.LOAD_ZONES) mod.trans_lz2 = Param(mod.TRANSMISSION_LINES, within=mod.LOAD_ZONES) - mod.min_data_check('TRANSMISSION_LINES', 'trans_lz1', 'trans_lz2') + # we don't do a min_data_check for TRANSMISSION_LINES, because it may be empty for model + # configurations that are sometimes run with interzonal transmission and sometimes not + # (e.g., island interconnect scenarios). However, presence of this column will still be + # checked by load_data_aug. + mod.min_data_check('trans_lz1', 'trans_lz2') mod.trans_dbid = Param(mod.TRANSMISSION_LINES, default=lambda m, tx: tx) mod.trans_length_km = Param(mod.TRANSMISSION_LINES, within=NonNegativeReals) mod.trans_efficiency = Param( @@ -187,9 +191,10 @@ def define_components(mod): mod.existing_trans_cap = Param( mod.TRANSMISSION_LINES, within=NonNegativeReals) + # Note: we don't do a min_data_check for BLD_YRS_FOR_EXISTING_TX, because it may be empty for + # models that start with no pre-existing transmission (e.g., island interconnect scenarios). mod.min_data_check( - 'trans_length_km', 'trans_efficiency', 'BLD_YRS_FOR_EXISTING_TX', - 'existing_trans_cap') + 'trans_length_km', 'trans_efficiency', 'existing_trans_cap') mod.trans_new_build_allowed = Param( mod.TRANSMISSION_LINES, within=Boolean, default=True) mod.NEW_TRANS_BLD_YRS = Set( @@ -324,20 +329,28 @@ def load_inputs(mod, switch_data, inputs_dir): """ + # TODO: send issue / pull request to Pyomo to allow .tab files with + # no rows after header (fix bugs in pyomo.core.plugins.data.text) switch_data.load_aug( filename=os.path.join(inputs_dir, 'transmission_lines.tab'), - select=('TRANSMISSION_LINE', 'trans_lz1', 'trans_lz2', - 'trans_length_km', 'trans_efficiency', 'existing_trans_cap'), - index=mod.TRANSMISSION_LINES, - param=(mod.trans_lz1, mod.trans_lz2, mod.trans_length_km, - mod.trans_efficiency, mod.existing_trans_cap)) - switch_data.load_aug( - filename=os.path.join(inputs_dir, 'trans_optional_params.tab'), - optional=True, - select=('TRANSMISSION_LINE', 'trans_dbid', 'trans_derating_factor', - 'trans_terrain_multiplier', 'trans_new_build_allowed'), - param=(mod.trans_dbid, mod.trans_derating_factor, - mod.trans_terrain_multiplier, mod.trans_new_build_allowed)) + select=( + 'TRANSMISSION_LINE', 'trans_lz1', 'trans_lz2', + 'trans_length_km', 'trans_efficiency', 'existing_trans_cap', + 'trans_dbid', 'trans_derating_factor', + 'trans_terrain_multiplier', 'trans_new_build_allowed' + ), + index=mod.TRANSMISSION_LINES, + optional_params=( + 'trans_dbid', 'trans_derating_factor', + 'trans_terrain_multiplier', 'trans_new_build_allowed' + ), + param=( + mod.trans_lz1, mod.trans_lz2, + mod.trans_length_km, mod.trans_efficiency, mod.existing_trans_cap, + mod.trans_dbid, mod.trans_derating_factor, + mod.trans_terrain_multiplier, mod.trans_new_build_allowed + ) + ) trans_params_path = os.path.join(inputs_dir, 'trans_params.dat') if os.path.isfile(trans_params_path): switch_data.load(filename=trans_params_path) diff --git a/switch_model/upgrade/upgrade_2_0_0b4.py b/switch_model/upgrade/upgrade_2_0_0b4.py index 7f44677be..22b83d7de 100644 --- a/switch_model/upgrade/upgrade_2_0_0b4.py +++ b/switch_model/upgrade/upgrade_2_0_0b4.py @@ -42,5 +42,15 @@ def rename_column(file_name, old_col_name, new_col_name, optional_file=True): for old, new in old_new_pairs: rename_column(fname, old_col_name=old, new_col_name=new) + # merge trans_optional_params.tab with transmission_lines.tab + trans_lines_path = os.path.join(inputs_dir, 'transmission_lines.tab') + trans_opt_path = os.path.join(inputs_dir, 'trans_optional_params.tab') + if os.path.isfile(trans_lines_path) and os.path.isfile(trans_lines_path): + trans_lines = pandas.read_csv(trans_lines_path, na_values=['.'], sep='\t') + trans_opt = pandas.read_csv(trans_opt_path, na_values=['.'], sep='\t') + trans_lines = trans_lines.merge(trans_opt, on='TRANSMISSION_LINE', how='left') + trans_lines.to_csv(trans_lines_path, sep='\t', na_rep='.', index=False) + os.remove(trans_opt_path) + # Write a new version text file. switch_model.upgrade._write_input_version(inputs_dir, upgrades_to) From 16e3f84626f0dda28eabff6968ee7fe0b02d0e3a Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Sun, 4 Mar 2018 12:25:49 -1000 Subject: [PATCH 22/51] save hourly dispatch --- switch_model/hawaii/save_results.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/switch_model/hawaii/save_results.py b/switch_model/hawaii/save_results.py index 7cf1e7bab..95eb38b90 100644 --- a/switch_model/hawaii/save_results.py +++ b/switch_model/hawaii/save_results.py @@ -203,6 +203,16 @@ def write_results(m, outputs_dir): 'peak' if m.ts_scale_to_year[m.tp_ts[t]] < avg_ts_scale else 'typical') ) + sorted_projects = tuple(sorted(g for g in m.GENERATION_PROJECTS)) + util.write_table( + m, m.TIMEPOINTS, + output_file=os.path.join(outputs_dir, "gen_dispatch{t}.tsv".format(t=tag)), + headings=("period", "timepoint_label")+sorted_projects, + values=lambda m, t: + (m.tp_period[t], m.tp_timestamp[t]) + + tuple(util.get(m.DispatchGen, (p, t), 0.0) for p in sorted_projects) + ) + # installed capacity information def gen_energy_source(g): return ( From 0ebd5b628deae0c1a07e94e0ed7680884def0eea Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 8 Mar 2018 03:41:45 -0500 Subject: [PATCH 23/51] improvements to Hawaii package --- .../hawaii/register_hi_storage_reserves.py | 123 ++++++++++++++++++ switch_model/hawaii/rps.py | 43 ++++-- switch_model/hawaii/smooth_dispatch.py | 13 +- switch_model/hawaii/switch_patch.py | 55 ++++++++ 4 files changed, 219 insertions(+), 15 deletions(-) create mode 100644 switch_model/hawaii/register_hi_storage_reserves.py diff --git a/switch_model/hawaii/register_hi_storage_reserves.py b/switch_model/hawaii/register_hi_storage_reserves.py new file mode 100644 index 000000000..f5f6876a9 --- /dev/null +++ b/switch_model/hawaii/register_hi_storage_reserves.py @@ -0,0 +1,123 @@ +""" +Defines types of reserve target and components that contribute to reserves, +and enforces the reserve targets. +""" +import os +from pyomo.environ import * + +# TODO: use standard reserves module for this +# note: this is modeled off of hawaii.reserves, to avoid adding lots of +# reserve-related code to the pumped storage and hydrogen modules. +# But eventually those modules should use the standard storage module and +# extend that as needed. + +def define_arguments(argparser): + argparser.add_argument('--hawaii-storage-reserve-types', nargs='+', default=['spinning'], + help= + "Type(s) of reserves to provide from hydrogen and/or pumped-hydro storage " + "(e.g., 'contingency' or 'regulation'). " + "Default is generic 'spinning'. Specify 'none' to disable." + ) + +def define_components(m): + + if [rt.lower() for rt in m.options.hawaii_storage_reserve_types] != ['none']: + if hasattr(m, 'PumpedHydroProjGenerateMW'): + m.PumpedStorageCharging = Var(m.PH_GENS, m.TIMEPOINTS, within=Binary) + m.Set_PumpedStorageCharging_Flag = Constraint(m.PH_GENS, m.TIMEPOINTS, rule=lambda m, phg, tp: + m.PumpedHydroProjGenerateMW[phg, tp] + <= + m.ph_max_capacity_mw[phg] * (1 - m.PumpedStorageCharging[phg, tp]) + ) + # choose how much pumped storage reserves to provide each hour, without reversing direction + m.PumpedStorageSpinningUpReserves = Var(m.PH_GENS, m.TIMEPOINTS, within=NonNegativeReals) + m.Limit_PumpedStorageSpinningUpReserves_When_Charging = Constraint( + m.PH_GENS, m.TIMEPOINTS, + rule=lambda m, phg, tp: + m.PumpedStorageSpinningUpReserves[phg, tp] + <= + m.PumpedHydroProjStoreMW[phg, tp] + + m.ph_max_capacity_mw[phg] * (1 - m.PumpedStorageCharging[phg, tp]) # relax when discharging + ) + m.Limit_PumpedStorageSpinningUpReserves_When_Discharging = Constraint( + m.PH_GENS, m.TIMEPOINTS, + rule=lambda m, phg, tp: + m.PumpedStorageSpinningUpReserves[phg, tp] + <= + m.Pumped_Hydro_Proj_Capacity_MW[phg, m.tp_period[tp]] - m.PumpedHydroProjGenerateMW[phg, tp] + + m.ph_max_capacity_mw[phg] * m.PumpedStorageCharging[phg, tp] # relax when charging + ) + # TODO: implement down reserves + m.PumpedStorageSpinningDownReserves = Var(m.PH_GENS, m.TIMEPOINTS, within=NonNegativeReals, bounds=(0,0)) + + # Register with spinning reserves + if hasattr(m, 'Spinning_Reserve_Up_Provisions'): # using spinning_reserves_advanced + # calculate available slack from hawaii storage + def up_expr(m, a, tp): + avail = 0.0 + if hasattr(m, 'HydrogenSlackUp'): + avail += sum(m.HydrogenSlackUp[z, tp] for z in m.ZONES_IN_BALANCING_AREA[a]) + if hasattr(m, 'PumpedStorageSpinningUpReserves'): + avail += sum( + m.PumpedStorageSpinningUpReserves[phg, tp] + for phg in m.PH_GENS + if m.ph_load_zone[phg] in m.ZONES_IN_BALANCING_AREA[a] + ) + return avail + m.HawaiiStorageSlackUp = Expression(m.BALANCING_AREA_TIMEPOINTS, rule=up_expr) + def down_expr(m, a, tp): + avail = 0.0 + if hasattr(m, 'HydrogenSlackDown'): + avail += sum(m.HydrogenSlackDown[z, tp] for z in m.ZONES_IN_BALANCING_AREA[a]) + if hasattr(m, 'PumpedStorageSpinningDownReserves'): + avail += sum( + m.PumpedStorageSpinningDownReserves[phg, tp] + for phg in m.PH_GENS + if m.ph_load_zone[phg] in m.ZONES_IN_BALANCING_AREA[a] + ) + return avail + m.HawaiiStorageSlackDown = Expression(m.BALANCING_AREA_TIMEPOINTS, rule=down_expr) + + if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + # using advanced formulation, index by reserve type, balancing area, timepoint + # define variables for each type of reserves to be provided + # choose how to allocate the slack between the different reserve products + m.HI_STORAGE_SPINNING_RESERVE_TYPES = Set( + initialize=m.options.hawaii_storage_reserve_types + ) + m.HawaiiStorageSpinningReserveUp = Var( + m.HI_STORAGE_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals + ) + m.HawaiiStorageSpinningReserveDown = Var( + m.HI_STORAGE_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals + ) + # constrain reserve provision within available slack + m.Limit_HawaiiStorageSpinningReserveUp = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: + sum( + m.HawaiiStorageSpinningReserveUp[rt, ba, tp] + for rt in m.HI_STORAGE_SPINNING_RESERVE_TYPES + ) <= m.HawaiiStorageSlackUp[ba, tp] + ) + m.Limit_HawaiiStorageSpinningReserveDown = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: + sum( + m.HawaiiStorageSpinningReserveDown[rt, ba, tp] + for rt in m.HI_STORAGE_SPINNING_RESERVE_TYPES + ) <= m.HawaiiStorageSlackDown[ba, tp] + ) + m.Spinning_Reserve_Up_Provisions.append('HawaiiStorageSpinningReserveUp') + m.Spinning_Reserve_Down_Provisions.append('HawaiiStorageSpinningReserveDown') + else: + # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint + if m.options.hawaii_storage_reserve_types != ['spinning']: + raise ValueError( + 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' + ) + m.Spinning_Reserve_Up_Provisions.append('HawaiiStorageSlackUp') + m.Spinning_Reserve_Down_Provisions.append('HawaiiStorageSlackDown') + diff --git a/switch_model/hawaii/rps.py b/switch_model/hawaii/rps.py index 9e0d87d9f..19a8eb207 100644 --- a/switch_model/hawaii/rps.py +++ b/switch_model/hawaii/rps.py @@ -15,9 +15,13 @@ def define_arguments(argparser): argparser.add_argument('--rps-deactivate', dest='rps_level', action='store_const', const='deactivate', help="Deactivate RPS.") - argparser.add_argument('--rps-no-renewables', - dest='rps_level', action='store_const', const='no_renewables', - help="Deactivate RPS and don't allow any new renewables.") + argparser.add_argument('--rps-no-new-renewables', + dest='rps_level', action='store_const', const='no_new_renewables', + help="Deactivate RPS and don't allow any new renewables except to replace existing capacity.") + argparser.add_argument('--rps-no-new-wind', action='store_true', default=False, + help="Don't allow any new wind capacity except to replace existing capacity.") + argparser.add_argument('--rps-no-wind', action='store_true', default=False, + help="Don't allow any new wind capacity or replacement of existing capacity.") argparser.add_argument( '--rps-allocation', default=None, choices=[ @@ -109,14 +113,31 @@ def rps_target_for_period_rule(m, p): m.RPS_Enforce = Constraint(m.PERIODS, rule=lambda m, per: m.RPSEligiblePower[per] >= m.rps_target_for_period[per] * m.RPSTotalPower[per] ) - elif m.options.rps_level == 'no_renewables': - # prevent construction of any new exclusively-renewable projects - # (doesn't actually ban use of biofuels in existing or multi-fuel projects, - # but that could be done with --biofuel-limit 0) - m.No_Renewables = Constraint(m.NEW_GEN_BLD_YRS, rule=lambda m, g, bld_yr: - (m.BuildGen[g, bld_yr] == 0) - if m.gen_energy_source[g] in m.RPS_ENERGY_SOURCES else - Constraint.Skip + elif m.options.rps_level == 'no_new_renewables': + # prevent construction of any new exclusively-renewable projects, but allow + # replacement of existing ones + # (doesn't ban use of biofuels in existing or multi-fuel projects, but that could + # be done with --biofuel-limit 0) + m.No_New_Renewables = Constraint(m.NEW_GEN_BLD_YRS, rule=lambda m, g, bld_yr: + (m.GenCapacity[g, bld_yr] <= m.GenCapacity[g, m.PERIODS.first()] - m.BuildGen[g, m.PERIODS.first()]) + if m.gen_energy_source[g] in m.RPS_ENERGY_SOURCES + else Constraint.Skip + ) + + wind_energy_sources = {'WND'} + if m.options.rps_no_new_wind: + # limit wind to existing capacity + m.No_New_Wind = Constraint(m.NEW_GEN_BLD_YRS, rule=lambda m, g, bld_yr: + (m.GenCapacity[g, bld_yr] <= m.GenCapacity[g, m.PERIODS.first()] - m.BuildGen[g, m.PERIODS.first()]) + if m.gen_energy_source[g] in wind_energy_sources + else Constraint.Skip + ) + if m.options.rps_no_wind: + # don't build any new capacity or replace existing + m.No_Wind = Constraint(m.NEW_GEN_BLD_YRS, rule=lambda m, g, bld_yr: + (m.BuildGen[g, bld_yr] == 0.0) + if m.gen_energy_source[g] in wind_energy_sources + else Constraint.Skip ) # Don't allow (bio)fuels to provide more than a certain percentage of the system's energy diff --git a/switch_model/hawaii/smooth_dispatch.py b/switch_model/hawaii/smooth_dispatch.py index b45438d44..566276507 100644 --- a/switch_model/hawaii/smooth_dispatch.py +++ b/switch_model/hawaii/smooth_dispatch.py @@ -5,7 +5,7 @@ import switch_model.solve def define_components(m): - if m.options.solver in ('cplex', 'cplexamp', 'gurobi'): + if m.options.solver in ('cplex', 'cplexamp', 'gurobi', 'gurobi_ampl'): m.options.smooth_dispatch = True else: # glpk and cbc can't handle quadratic problem used for smoothing @@ -13,8 +13,8 @@ def define_components(m): if m.options.verbose: print "Not smoothing dispatch because {} cannot solve a quadratic model.".format(m.options.solver) print "Remove hawaii.smooth_dispatch from modules.txt and iterate.txt to avoid this message." - - # add an alternative objective function that smoothes out various non-cost variables + + # add an alternative objective function that smoothes out time-shiftable energy sources and sinks if m.options.smooth_dispatch: def Smooth_Free_Variables_obj_rule(m): # minimize production (i.e., maximize curtailment / minimize losses) @@ -25,7 +25,7 @@ def Smooth_Free_Variables_obj_rule(m): for component in m.Zone_Power_Injections) # minimize the variability of various slack responses adjustable_components = [ - 'ShiftDemand', 'ChargeBattery', 'DischargeBattery', 'ChargeEVs', + 'ShiftDemand', 'ChargeBattery', 'DischargeBattery', 'ChargeEVs', 'RunElectrolyzerMW', 'LiquifyHydrogenMW', 'DispatchFuelCellMW' ] for var in adjustable_components: @@ -34,6 +34,11 @@ def Smooth_Free_Variables_obj_rule(m): print "Will smooth {}.".format(var) comp = getattr(m, var) obj += sum(comp[z, t]*comp[z, t] for z in m.LOAD_ZONES for t in m.TIMEPOINTS) + # include standard storage generators too + if hasattr(m, 'STORAGE_GEN_TPS'): + print "Will smooth charging and discharging of standard storage." + obj += sum(m.ChargeStorage[g, tp]*m.ChargeStorage[g, tp] for g, tp in m.STORAGE_GEN_TPS) + obj += sum(m.DispatchGen[g, tp]*m.DispatchGen[g, tp] for g, tp in m.STORAGE_GEN_TPS) return obj m.Smooth_Free_Variables = Objective(rule=Smooth_Free_Variables_obj_rule, sense=minimize) # leave standard objective in effect for now diff --git a/switch_model/hawaii/switch_patch.py b/switch_model/hawaii/switch_patch.py index b04c516ed..783e2bb94 100644 --- a/switch_model/hawaii/switch_patch.py +++ b/switch_model/hawaii/switch_patch.py @@ -14,11 +14,66 @@ def new_create_command_line(*args, **kwargs): # see http://www-01.ibm.com/support/docview.wss?uid=swg21399941 # and http://www-01.ibm.com/support/docview.wss?uid=swg21400009 ) + print "changed CPLEX solve script to the following:" + print command.script return command from pyomo.solvers.plugins.solvers.CPLEX import CPLEXSHELL old_create_command_line = CPLEXSHELL.create_command_line CPLEXSHELL.create_command_line = new_create_command_line +# # TODO: combine the following changes into a pull request for Pyomo +# # patch Pyomo's table-reading function to allow .tab files with headers but no data +# import os, re +# def new_tab_read(self): +# if not os.path.exists(self.filename): +# raise IOError("Cannot find file '%s'" % self.filename) +# self.FILE = open(self.filename, 'r') +# try: +# tmp=[] +# for line in self.FILE: +# line=line.strip() +# tokens = re.split("[\t ]+",line) +# if tokens != ['']: +# tmp.append(tokens) +# if len(tmp) == 0: +# raise IOError("Empty *.tab file") +# else: # removed strange special handling for one-row files +# self._set_data(tmp[0], tmp[1:]) +# except: +# raise +# finally: +# self.FILE.close() +# self.FILE = None +# from pyomo.core.plugins.data.text import TextTable +# TextTable.read = new_tab_read +# +# try: +# import inspect +# import pyomo.core.data.process_data +# pp_code = inspect.getsource(pyomo.core.data.process_data._process_param) +# start = pp_code.find('if singledef:', 0, 2000) +# if start < 0: +# raise RuntimeError('unable to find singledef statement') +# # patch to allow command to have no more arguments at this point (i.e., no data) +# srch, repl = 'if cmd[0] == "(tr)":', 'if cmd and cmd[0] == "(tr)":' +# start = pp_code.find(srch, start, start + 500) +# if start < 0: +# raise RuntimeError('unable to find (tr) statement') +# pp_code = pp_code[:start] + repl + pp_code[start+len(srch):] +# # patch next line for the same reason +# srch, repl = 'if cmd[0] != ":":', 'if not cmd or cmd[0] != ":":' +# start = pp_code.find(srch, start, start + 500) +# if start < 0: +# raise RuntimeError('unable to find ":" statement') +# pp_code = pp_code[:start] + repl + pp_code[start+len(srch):] +# # compile code to a function in the process_data module +# exec(pp_code, vars(pyomo.core.data.process_data)) +# except Exception as e: +# print "Unable to patch current version of pyomo.core.data.process_data:" +# print '{}({})'.format(type(e).__name__, ','.join(repr(a) for a in e.args)) +# print "Switch will not be able to read empty data files." + + def define_components(m): """Make various changes to the model to facilitate reporting and avoid unwanted behavior""" From 5b2563871e86285d62822521d9a3754b0f52b1de Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 27 Jun 2018 17:07:39 -1000 Subject: [PATCH 24/51] add a fixed-size contingency option (maybe all the reserve settings should be in a data file instead?) --- .../spinning_reserves_advanced.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py b/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py index 4389edad5..c7aefe353 100644 --- a/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py +++ b/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py @@ -35,6 +35,11 @@ def define_arguments(argparser): "committed capacity of a generation project falling offline. " "Unlike unit contingencies, this is a purely linear expression.") ) + group.add_argument('--fixed-contingency', type=float, default=0.0, + help=("Add a fixed generator contingency reserve margin, specified in MW. " + "This can be used alone or in combination with the other " + "contingency options.") + ) group.add_argument('--spinning-requirement-rule', default=None, choices = ["Hawaii", "3+5", "none"], help=("Choose rules for spinning reserves requirements as a function " @@ -108,6 +113,18 @@ def define_dynamic_lists(m): m.Spinning_Reserve_Down_Contingencies = [] +def gen_fixed_contingency(m): + """ + Add a fixed contingency reserve margin (much faster than unit-by-unit + reserve margins, and reasonable when there is a single largest plant + that is usually online and/or reserves are cheap). + """ + m.GenFixedContingency = Param( + m.BALANCING_AREA_TIMEPOINTS, + initialize=lambda m: m.options.fixed_contingency + ) + m.Spinning_Reserve_Up_Contingencies.append('GenFixedContingency') + def gen_unit_contingency(m): """ Add components for unit-level contingencies. A generation project can @@ -461,6 +478,8 @@ def rule(m): m.Spinning_Reserve_Down_Provisions.append('TotalGenSpinningReservesDown') # define reserve requirements + if m.options.fixed_contingency: + gen_fixed_contingency(m) if m.options.unit_contingency: gen_unit_contingency(m) if m.options.project_contingency: From 247540bdf89fcc1d7ed463fff797315f763f484f Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 27 Jun 2018 21:25:28 -1000 Subject: [PATCH 25/51] minor tweaks and bug fixes --- .../energy_sources/fuel_costs/markets.py | 4 +- .../generators/core/commit/discrete.py | 2 +- switch_model/generators/core/dispatch.py | 18 +-- switch_model/reporting/__init__.py | 13 +- switch_model/solve_scenarios.py | 119 ++++++++++-------- switch_model/upgrade/upgrade_2_0_0b4.py | 16 +-- switch_model/utilities.py | 111 ++++++++-------- tests/upgrade_test.py | 3 +- 8 files changed, 144 insertions(+), 142 deletions(-) diff --git a/switch_model/energy_sources/fuel_costs/markets.py b/switch_model/energy_sources/fuel_costs/markets.py index e2e2e44d8..3b1c8ae90 100644 --- a/switch_model/energy_sources/fuel_costs/markets.py +++ b/switch_model/energy_sources/fuel_costs/markets.py @@ -75,8 +75,8 @@ def define_components(mod): ConsumeFuelTier[rfm, period, tier] is a decision variable that denotes the amount of fuel consumed in each tier of a supply curve - in a particular regional fuel market and period. It has an upper bound - of rfm_supply_tier_limit. + in a particular regional fuel market and period (MMBtu/year). It + has an upper bound of rfm_supply_tier_limit. FuelConsumptionInMarket[rfm, period] is a derived decision variable specifying the total amount of fuel consumed in a regional fuel diff --git a/switch_model/generators/core/commit/discrete.py b/switch_model/generators/core/commit/discrete.py index b22a97bad..8a2a3120e 100644 --- a/switch_model/generators/core/commit/discrete.py +++ b/switch_model/generators/core/commit/discrete.py @@ -10,7 +10,7 @@ dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\ 'switch_model.financials', 'switch_model.energy_sources.properties',\ - 'switch_model.generators.core.build', 'switch_model.investment.gen_discrete_build',\ + 'switch_model.generators.core.build',\ 'switch_model.generators.core.dispatch', 'switch_model.operations.unitcommit' def define_components(mod): diff --git a/switch_model/generators/core/dispatch.py b/switch_model/generators/core/dispatch.py index 5a72322ca..604cb7329 100644 --- a/switch_model/generators/core/dispatch.py +++ b/switch_model/generators/core/dispatch.py @@ -155,19 +155,13 @@ def period_active_gen_rule(m, period): mod.GENS_IN_PERIOD = Set(mod.PERIODS, initialize=period_active_gen_rule, doc="The set of projects active in a given period.") - def TPS_FOR_GEN_rule(m, gen): - if not hasattr(m, '_TPS_FOR_GEN_dict'): - m._TPS_FOR_GEN_dict = collections.defaultdict(set) - for (_gen, period) in m.GEN_PERIODS: - for t in m.TPS_IN_PERIOD[period]: - m._TPS_FOR_GEN_dict[_gen].add(t) - result = m._TPS_FOR_GEN_dict.pop(gen) - if len(m._TPS_FOR_GEN_dict) == 0: - delattr(m, '_TPS_FOR_GEN_dict') - return result mod.TPS_FOR_GEN = Set( - mod.GENERATION_PROJECTS, within=mod.TIMEPOINTS, - rule=TPS_FOR_GEN_rule) + mod.GENERATION_PROJECTS, + within=mod.TIMEPOINTS, + rule=lambda m, g: ( + tp for p in m.PERIODS_FOR_GEN[g] for tp in m.TPS_IN_PERIOD[p] + ) + ) def TPS_FOR_GEN_IN_PERIOD_rule(m, gen, period): if not hasattr(m, '_TPS_FOR_GEN_IN_PERIOD_dict'): diff --git a/switch_model/reporting/__init__.py b/switch_model/reporting/__init__.py index 4ea4bf209..2ffdad93a 100644 --- a/switch_model/reporting/__init__.py +++ b/switch_model/reporting/__init__.py @@ -8,7 +8,7 @@ Modules within this directory may implement custom exports that depend on multiple Switch modules. Each individual Switch module that defines components should only access model components that -it defined or that were defined upstream in Switch modules that +it defined or that were defined upstream in Switch modules that it depends on. For example, the load_zone module cannot assume whether users will be including project.no_commit or project.unitcommit, so it cannot reference model components defined in either of those files. However, @@ -65,7 +65,7 @@ def format_row(row): else: row[i] = sig_digits.format(v) return tuple(row) - + try: w.writerows( format_row(row=values(instance, *unpack_elements(x))) @@ -76,7 +76,7 @@ def format_row(row): w.writerows( # TODO: flatten x (unpack tuples) like Pyomo before calling values() # That may cause problems elsewhere though... - + format_row(row=values(instance, *x)) for x in itertools.product(*indexes) ) @@ -141,17 +141,14 @@ def _save_generic_results(instance, outdir, sorted_output): def _save_total_cost_value(instance, outdir): - values = instance.Minimize_System_Cost.values() - assert len(values) == 1 - total_cost = values[0].expr() with open(os.path.join(outdir, 'total_cost.txt'), 'w') as fh: - fh.write('%s\n' % total_cost) + fh.write('{}\n'.format(value(instance.SystemCost))) def post_solve(instance, outdir): """ Minimum output generation for all model runs. - + """ _save_generic_results(instance, outdir, instance.options.sorted_output) _save_total_cost_value(instance, outdir) diff --git a/switch_model/solve_scenarios.py b/switch_model/solve_scenarios.py index adbb44f8c..011e557ee 100755 --- a/switch_model/solve_scenarios.py +++ b/switch_model/solve_scenarios.py @@ -2,19 +2,19 @@ # Copyright (c) 2015-2017 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. -"""Scenario management module. -Reads scenario-related arguments from the command line and the same options -file that solve.py would read, and uses them to setup scenarios_to_run(). -For each scenario, this generator yields a tokenized list of arguments that -define that scenario (similar to sys.argv, but based on a line from a scenario -definition file, followed by any options specified on the command line). +"""Scenario management module. +Reads scenario-related arguments from the command line and the same options +file that solve.py would read, and uses them to setup scenarios_to_run(). +For each scenario, this generator yields a tokenized list of arguments that +define that scenario (similar to sys.argv, but based on a line from a scenario +definition file, followed by any options specified on the command line). Then it calls solve.main() with this list of arguments (once for each scenario). A queueing system (based on lock directories within a queue directory) is used to -ensure that scenarios_to_run() will always return the next unsolved +ensure that scenarios_to_run() will always return the next unsolved scenario from the scenario list file, even if the file is edited while this script is running. This makes it possible to amend the scenario list while -long solver jobs are running. Multiple solver scripts can also use +long solver jobs are running. Multiple solver scripts can also use scenarios_to_run() in separate processes to select the next job to run. """ @@ -53,39 +53,48 @@ scenario_queue_dir = scenario_manager_args.scenario_queue job_id = scenario_manager_args.job_id -# Make a best effort to get a unique, persistent job_id for each job. -# This is used to clear the queue of running tasks if a task is stopped and -# restarted. (would be better if other jobs could do this when this job dies -# but it's hard to see how they can detect when this job fails.) -# (The idea is that the user will run multiple jobs in parallel, with one -# thread per job, to process all the scenarios. These might be run in separate -# terminal windows, or in separate instances of gnu screen, or as numbered -# jobs on an HPC system. Sometimes a job will get interrupted, e.g., if the -# user presses ctrl-c in a terminal window or if the job is launched on an -# interruptible queue. This script attempts to detect when that job gets -# relaunched, and re-run the interrupted scenario.) -if job_id is None: - job_id = os.environ.get('JOB_ID') # could be set by user -if job_id is None: - job_id = os.environ.get('JOBID') # could be set by user -if job_id is None: - job_id = os.environ.get('SLURM_JOBID') -if job_id is None: - job_id = os.environ.get('OMPI_MCA_ess_base_jobid') -if job_id is None: - # construct one from hostname and parent's pid - # this way, each job launched from a different terminal window - # or different instance of gnu screen will have a persistent ID - # (This won't work on Windows before Python 3.2; in that case, - # users should specify a --job-id or set an environment variable - # when running multiple jobs in parallel. Without that, all - # jobs will think they have the same ID, and at startup they will - # try to re-run the scenario currently being run by some other job.) - if hasattr(os, 'getppid'): - job_id = socket.gethostname() + '_' + str(os.getppid()) - else: - # won't be able to automatically clear previously interrupted job - job_id = socket.gethostname() + '_' + str(os.getpid()) +# Get a unique job id. Note: in the past we have tried to get a +# persistent ID for each parallel task, so that it could requeue any +# jobs it was previously working on when it restarted. However, this +# is kludgy to begin with (only works if restarted under similar conditions) +# and tends to fail. It also cannot work with a bare "srun" or "mpirun" +# command, which might launch 20+ tasks that end up thinking they're the +# same job and race to reset the queue. +job_id = socket.gethostname() + '_' + str(os.getpid()) + +# # Make a best effort to get a unique, persistent job_id for each job. +# # This is used to clear the queue of running tasks if a task is stopped and +# # restarted. (would be better if other jobs could do this when this job dies +# # but it's hard to see how they can detect when this job fails.) +# # (The idea is that the user will run multiple jobs in parallel, with one +# # thread per job, to process all the scenarios. These might be run in separate +# # terminal windows, or in separate instances of gnu screen, or as numbered +# # jobs on an HPC system. Sometimes a job will get interrupted, e.g., if the +# # user presses ctrl-c in a terminal window or if the job is launched on an +# # interruptible queue. This script attempts to detect when that job gets +# # relaunched, and re-run the interrupted scenario.) +# if job_id is None: +# job_id = os.environ.get('JOB_ID') # could be set by user +# if job_id is None: +# job_id = os.environ.get('JOBID') # could be set by user +# if job_id is None: +# job_id = os.environ.get('SLURM_JOBID') +# if job_id is None: +# job_id = os.environ.get('OMPI_MCA_ess_base_jobid') +# if job_id is None: +# # construct one from hostname and parent's pid +# # this way, each job launched from a different terminal window +# # or different instance of gnu screen will have a persistent ID +# # (This won't work on Windows before Python 3.2; in that case, +# # users should specify a --job-id or set an environment variable +# # when running multiple jobs in parallel. Without that, all +# # jobs will think they have the same ID, and at startup they will +# # try to re-run the scenario currently being run by some other job.) +# if hasattr(os, 'getppid'): +# job_id = socket.gethostname() + '_' + str(os.getppid()) +# else: +# # won't be able to automatically clear previously interrupted job +# job_id = socket.gethostname() + '_' + str(os.getpid()) running_scenarios_file = os.path.join(scenario_queue_dir, job_id+"_running.txt") @@ -101,11 +110,11 @@ def main(args=None): os.makedirs(scenario_queue_dir) except OSError: pass # directory probably exists already - + # remove lock directories for any scenarios that were # previously being solved by this job but were interrupted unlock_running_scenarios() - + for (scenario_name, args) in scenarios_to_run(): print( "\n\n=======================================================================\n" @@ -126,15 +135,15 @@ def main(args=None): def scenarios_to_run(): """Generator function which returns argument lists for each scenario that should be run. - + Note: each time a new scenario is required, this re-reads the scenario_list file and then returns the first scenario that hasn't already started running. - This allows multiple copies of the script to be run and allocate scenarios among + This allows multiple copies of the script to be run and allocate scenarios among themselves.""" - + skipped = [] ran = [] - + if requested_scenarios: # user requested one or more scenarios # just run them in the order specified, with no queue-management @@ -150,12 +159,12 @@ def scenarios_to_run(): # Run every scenario in the list, with queue management # This is done by repeatedly scanning the scenario list and choosing # the first scenario that hasn't been run. This way, users can edit the - # list and this script will adapt to the changes as soon as it finishes + # list and this script will adapt to the changes as soon as it finishes # the current scenario. all_done = False while not all_done: all_done = True - # cache a list of scenarios that have been run, to avoid trying to checkout every one. + # cache a list of scenarios that have been run, to avoid trying to checkout every one. # This list is found by retrieving the names of the lock-directories. already_run = filter(os.path.isdir, os.listdir(".")) for scenario_name, base_args in get_scenario_dict().items(): @@ -178,7 +187,7 @@ def scenarios_to_run(): "run these scenarios again. (rm -rf {sq})".format(sq=scenario_queue_dir) ) return - + def parse_arg(arg, args=sys.argv[1:], **parse_kw): """Parse one argument from the argument list, using options as specified for argparse""" @@ -202,7 +211,7 @@ def get_scenario_dict(): with open(scenario_list_file, 'r') as f: scenario_list_text = [r.strip() for r in f.read().splitlines()] scenario_list_text = [r for r in scenario_list_text if r and not r.startswith("#")] - + # note: text.splitlines() omits newlines and ignores presence/absence of \n at end of the text # shlex.split() breaks an command-line-style argument string into a list like sys.argv scenario_list = [shlex.split(r) for r in scenario_list_text] @@ -236,9 +245,9 @@ def mark_completed(scenario_name): # remove the scenario from the list of running scenarios (since it's been completed now) running_scenarios.remove(scenario_name) write_running_scenarios_file() - # note: the scenario lock directory is left in place so the scenario won't get checked + # note: the scenario lock directory is left in place so the scenario won't get checked # out again - + def write_running_scenarios_file(): # write the list of scenarios currently being run by this job to disk # so they can be released back to the queue if the job is interrupted and restarted @@ -255,7 +264,7 @@ def write_running_scenarios_file(): f.truncate() else: # remove the running_scenarios_file entirely if it would be empty - try: + try: os.remove(running_scenarios_file) except OSError as e: if e.errno != 2: # no such file @@ -268,7 +277,7 @@ def unlock_running_scenarios(): with open(running_scenarios_file) as f: interrupted = f.read().splitlines() for scenario_name in interrupted: - try: + try: os.rmdir(os.path.join(scenario_queue_dir, scenario_name)) except OSError as e: if e.errno != 2: # no such file diff --git a/switch_model/upgrade/upgrade_2_0_0b4.py b/switch_model/upgrade/upgrade_2_0_0b4.py index 22b83d7de..f3048bac8 100644 --- a/switch_model/upgrade/upgrade_2_0_0b4.py +++ b/switch_model/upgrade/upgrade_2_0_0b4.py @@ -25,19 +25,19 @@ def rename_file(old_name, new_name, optional_file=True): if optional_file and not os.path.isfile(old_path): return shutil.move(old_path, new_path) - + def rename_column(file_name, old_col_name, new_col_name, optional_file=True): path = os.path.join(inputs_dir, file_name) if optional_file and not os.path.isfile(path): return df = pandas.read_csv(path, na_values=['.'], sep='\t') df.rename(columns={old_col_name: new_col_name}, inplace=True) - df.to_csv(path, sep='\t', na_rep='.', index=False) - + df.to_csv(path, sep='\t', na_rep='.', index=False) + old_new_column_names_in_file = { 'gen_inc_heat_rates.tab': [('project', 'GENERATION_PROJECT')] } - + for fname, old_new_pairs in old_new_column_names_in_file.iteritems(): for old, new in old_new_pairs: rename_column(fname, old_col_name=old, new_col_name=new) @@ -47,10 +47,12 @@ def rename_column(file_name, old_col_name, new_col_name, optional_file=True): trans_opt_path = os.path.join(inputs_dir, 'trans_optional_params.tab') if os.path.isfile(trans_lines_path) and os.path.isfile(trans_lines_path): trans_lines = pandas.read_csv(trans_lines_path, na_values=['.'], sep='\t') - trans_opt = pandas.read_csv(trans_opt_path, na_values=['.'], sep='\t') - trans_lines = trans_lines.merge(trans_opt, on='TRANSMISSION_LINE', how='left') + if os.path.isfile(trans_opt_path): + trans_opt = pandas.read_csv(trans_opt_path, na_values=['.'], sep='\t') + trans_lines = trans_lines.merge(trans_opt, on='TRANSMISSION_LINE', how='left') trans_lines.to_csv(trans_lines_path, sep='\t', na_rep='.', index=False) - os.remove(trans_opt_path) + if os.path.isfile(trans_opt_path): + os.remove(trans_opt_path) # Write a new version text file. switch_model.upgrade._write_input_version(inputs_dir, upgrades_to) diff --git a/switch_model/utilities.py b/switch_model/utilities.py index aeaa41792..68f971772 100644 --- a/switch_model/utilities.py +++ b/switch_model/utilities.py @@ -36,7 +36,7 @@ def create_model(module_list, args=sys.argv[1:]): This is implemented as calling the following functions for each module that has them defined: - + define_dynamic_lists(model): Add lists to the model that other modules can register with. Used for power balance equations, cost components of the objective function, etc. @@ -44,13 +44,13 @@ def create_model(module_list, args=sys.argv[1:]): define_components(model): Add components to the model object (parameters, sets, decisions variables, expressions, and/or constraints). Also register with relevant dynamic_lists. - + define_dynamic_components(model): Add dynamic components to the model that depend on the contents of dyanmics lists. Power balance constraints and the objective function are defined in this manner. - + See financials and balancing.load_zones for examples of dynamic definitions. - + All modules can request access to command line parameters and set their default values for those options. If this codebase is being used more like a library than a stand-alone executable, this behavior can cause problems. @@ -83,7 +83,7 @@ def create_model(module_list, args=sys.argv[1:]): if hasattr(module, 'define_arguments'): module.define_arguments(argparser) model.options = argparser.parse_args(args) - + # Define model components for module in model.get_modules(): if hasattr(module, 'define_dynamic_lists'): @@ -142,52 +142,46 @@ def save_inputs_as_dat(model, instance, save_path="inputs/complete_inputs.dat", tools that have not been fully integrated with DataPortal. SYNOPSIS: save_inputs_as_dat(model, instance, save_path) - - """ # helper function to convert values to strings, # putting quotes around values that start as strings quote_str = lambda v: '"{}"'.format(v) if isinstance(v, basestring) else '{}'.format(str(v)) - + # helper function to create delimited lists from single items or iterables of any data type + from switch_model.reporting import make_iterable + join_space = lambda items: ' '.join(map(str, make_iterable(items))) # space-separated list + join_comma = lambda items: ','.join(map(str, make_iterable(items))) # comma-separated list + with open(save_path, "w") as f: for component_name in instance.DataPortal.data(): if component_name in exclude: - continue # don't write data for components in exclude list + continue # don't write data for components in exclude list # (they're in scenario-specific files) component = getattr(model, component_name) comp_class = type(component).__name__ component_data = instance.DataPortal.data(name=component_name) if comp_class == 'SimpleSet' or comp_class == 'OrderedSimpleSet': - f.write("set " + component_name + " := ") - f.write(' '.join(map(str, component_data))) # space-separated list - f.write(";\n") + f.write( + "set {} := {};\n" + .format(component_name, join_space(component_data)) + ) elif comp_class == 'IndexedParam': - if len(component_data) > 0: # omit components for which no data were provided - f.write("param " + component_name + " := ") - if component.index_set().dimen == 1: - f.write(' '.join(str(key) + " " + quote_str(value) - for key,value in component_data.iteritems())) - else: - f.write("\n") - for key,value in (sorted(component_data.iteritems()) - if sorted_output - else component_data.iteritems()): - f.write(" " + - ' '.join(map(str, key)) + " " + - quote_str(value) + "\n") + if component_data: # omit components for which no data were provided + f.write("param {} := \n".format(component_name)) + for key, value in ( + sorted(iteritems(component_data)) + if sorted_output + else iteritems(component_data) + ): + f.write(" {} {}\n".format(join_space(key), quote_str(value))) f.write(";\n") elif comp_class == 'SimpleParam': - f.write("param " + component_name + " := " + str(component_data) + ";\n") + f.write("param {} := {};\n".format(component_name, component_data)) elif comp_class == 'IndexedSet': - # raise RuntimeError( - # "Error with IndexedSet {}. Support for .dat export is not tested.". - # format(component_name)) - # print "Warning: exporting IndexedSet {}, but code has not been tested.".format( - # component_name) - for key in component_data: # note: key is always a tuple - f.write("set " + component_name + "[" + ",".join(map(str, key)) + "] := ") - f.write(' '.join(map(str, component_data[key]))) # space-separated list - f.write(";\n") + for key, vals in iteritems(component_data): + f.write( + "set {}[{}] := {};\n" + .format(component_name, join_comma(key), join_space(vals)) + ) else: raise ValueError( "Error! Component type {} not recognized for model element '{}'.". @@ -204,18 +198,18 @@ def pre_solve(instance, outputs_dir=None): def post_solve(instance, outputs_dir=None): """ - Call post-solve function (if present) in all modules used to compose this model. + Call post-solve function (if present) in all modules used to compose this model. This function can be used to report or save results from the solved model. """ if outputs_dir is None: outputs_dir = getattr(instance.options, "outputs_dir", "outputs") if not os.path.exists(outputs_dir): os.makedirs(outputs_dir) - + # TODO: implement a check to call post solve functions only if # solver termination condition is not 'infeasible' or 'unknown' # (the latter may occur when there are problems with licenses, etc) - + for module in instance.get_modules(): if hasattr(module, 'post_solve'): module.post_solve(instance, outputs_dir) @@ -298,16 +292,12 @@ def _add_min_data_check(model): def has_discrete_variables(model): - for variable in model.component_objects(Var, active=True): - if variable.is_indexed(): - for v in variable.itervalues(): - if v.is_binary() or v.is_integer(): - return True - else: - if v.is_binary() or v.is_integer(): - return True - return False - + all_elements = lambda v: v.itervalues() if v.is_indexed() else [v] + return any( + v.is_binary() or v.is_integer() + for variable in model.component_objects(Var, active=True) + for v in all_elements(variable) + ) def check_mandatory_components(model, *mandatory_model_components): """ @@ -422,17 +412,26 @@ def load_aug(switch_data, optional=False, auto_select=False, The name load_aug() is not great and may be changed. """ + # TODO: + # Allow user to specify filename when defining parameters and sets. + # Also allow user to specify the name(s) of the column(s) in each set. + # Then use those automatically to pull data from the right file (and to + # write correct index column names in the generic output files). + # This will simplify code and ease comprehension (user can see + # immediately where the data come from for each component). This can + # also support auto-documenting of parameters and input files. + path = kwds['filename'] # Skip if the file is missing if optional and not os.path.isfile(path): return # If this is a .dat file, then skip the rest of this fancy business; we'll - # only check if the file is missing and optional for .dat files. + # only check if the file is missing and optional for .tab files. filename, extension = os.path.splitext(path) if extension == '.dat': switch_data.load(**kwds) return - + # copy the optional_params to avoid side-effects when the list is altered below optional_params=list(optional_params) # Parse header and first row @@ -518,6 +517,7 @@ def load_aug(switch_data, optional=False, auto_select=False, for (i, p_i) in del_items: del kwds['select'][i] del kwds['param'][p_i] + if optional and file_has_no_data_rows: # Skip the file. Note that we are only doing this after having # validated the file's column headings. @@ -527,10 +527,10 @@ def load_aug(switch_data, optional=False, auto_select=False, switch_data.load(**kwds) -# Define an argument parser that accepts the allow_abbrev flag to +# Define an argument parser that accepts the allow_abbrev flag to # prevent partial matches, even on versions of Python before 3.5. # See https://bugs.python.org/issue14910 -# This is needed because the parser may sometimes be called with only a subset +# This is needed because the parser may sometimes be called with only a subset # of the eventual argument list (e.g., to parse module-related arguments before # loading the modules and adding their arguments to the list), and without this # flag, the parser could match arguments that are meant to be used later @@ -540,7 +540,7 @@ def load_aug(switch_data, optional=False, auto_select=False, if sys.version_info >= (3, 5): _ArgumentParser = argparse.ArgumentParser else: - # patch ArgumentParser to accept the allow_abbrev flag + # patch ArgumentParser to accept the allow_abbrev flag # (works on Python 2.7 and maybe others) class _ArgumentParser(argparse.ArgumentParser): def __init__(self, *args, **kwargs): @@ -578,7 +578,7 @@ class Logging: def __init__(self, logs_dir): # Make logs directory if class is initialized if not os.path.exists(logs_dir): - os.mkdir(logs_dir) + os.makedirs(logs_dir) # Assign sys.stdout and a log file as locations to write to self.terminal = sys.stdout @@ -603,10 +603,9 @@ def flush(self): def iteritems(obj): - """ Iterator of key, value pairs for obj; + """ Iterator of key, value pairs for obj; equivalent to obj.items() on Python 3+ and obj.iteritems() on Python 2 """ try: return obj.iteritems() except AttributeError: # Python 3+ return obj.items() - diff --git a/tests/upgrade_test.py b/tests/upgrade_test.py index dced11df1..7ca609673 100644 --- a/tests/upgrade_test.py +++ b/tests/upgrade_test.py @@ -62,7 +62,8 @@ def test_upgrade(): '--outputs-dir', upgrade_dir_outputs]) total_cost = read_file(os.path.join(upgrade_dir_outputs, 'total_cost.txt')) finally: - sys.path.remove(upgrade_dir) + if upgrade_dir in sys.path: # code above may have failed before appending + sys.path.remove(upgrade_dir) _remove_temp_dir(temp_dir) expectation_file = get_expectation_path(example_dir) if UPDATE_EXPECTATIONS: From f0037687e9493f779f9f1edc8531622525420d6f Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 27 Jun 2018 21:35:35 -1000 Subject: [PATCH 26/51] rename '_FUEL_BASED_GEN_TPS' to 'FUEL_BASED_GEN_TPS' (old version looks like an editing error) --- .../generators/core/commit/fuel_use.py | 2 +- switch_model/generators/core/dispatch.py | 36 ++++----- switch_model/generators/core/no_commit.py | 17 ++--- switch_model/hawaii/rps.py | 76 +++++++++---------- switch_model/policies/rps_simple.py | 2 +- 5 files changed, 66 insertions(+), 67 deletions(-) diff --git a/switch_model/generators/core/commit/fuel_use.py b/switch_model/generators/core/commit/fuel_use.py index 396c27288..a72b5ff60 100644 --- a/switch_model/generators/core/commit/fuel_use.py +++ b/switch_model/generators/core/commit/fuel_use.py @@ -117,7 +117,7 @@ def FUEL_USE_SEGMENTS_FOR_GEN_default_rule(m, g): dimen=4, initialize=lambda m: [ (g, t, intercept, slope) - for (g, t) in m._FUEL_BASED_GEN_TPS + for (g, t) in m.FUEL_BASED_GEN_TPS for (intercept, slope) in m.FUEL_USE_SEGMENTS_FOR_GEN[g] ] ) diff --git a/switch_model/generators/core/dispatch.py b/switch_model/generators/core/dispatch.py index 604cb7329..59fad8805 100644 --- a/switch_model/generators/core/dispatch.py +++ b/switch_model/generators/core/dispatch.py @@ -30,13 +30,13 @@ def define_components(mod): they can be dispatched. A dispatch decisions is made for each member of this set. Members of this set can be abbreviated as (g, t) or (g, t). - - TPS_FOR_GEN[g] is a set array showing all timepoints when a - project is active. These are the timepoints corresponding to - PERIODS_FOR_GEN. This is the same data as GEN_TPS, + + TPS_FOR_GEN[g] is a set array showing all timepoints when a + project is active. These are the timepoints corresponding to + PERIODS_FOR_GEN. This is the same data as GEN_TPS, but split into separate sets for each project. - TPS_FOR_GEN_IN_PERIOD[g, period] is the same as + TPS_FOR_GEN_IN_PERIOD[g, period] is the same as TPS_FOR_GEN, but broken down by period. Periods when the project is inactive will yield an empty set. @@ -96,13 +96,13 @@ def define_components(mod): in $base_year/hour in the future period (rather than Net Present Value). - _FUEL_BASED_GEN_TPS is a subset of GEN_TPS - showing all times when fuel-consuming projects could be dispatched + FUEL_BASED_GEN_TPS is a subset of GEN_TPS + showing all times when fuel-consuming projects could be dispatched (used to identify timepoints when fuel use must match power production). GEN_TP_FUELS is a subset of GEN_TPS * FUELS, showing all the valid combinations of project, timepoint and fuel, - i.e., all the times when each project could consume a fuel that is + i.e., all the times when each project could consume a fuel that is limited, costly or produces emissions. GenFuelUseRate[(g, t, f) in GEN_TP_FUELS] is a @@ -123,7 +123,7 @@ def define_components(mod): fuel's upstream emissions, as well as Carbon Capture efficiency for generators that implement Carbon Capture and Sequestration. This does not yet support multi-fuel generators. - + AnnualEmissions[p in PERIODS]:The system's annual emissions, in metric tonnes of CO2 per year. @@ -156,7 +156,7 @@ def period_active_gen_rule(m, period): doc="The set of projects active in a given period.") mod.TPS_FOR_GEN = Set( - mod.GENERATION_PROJECTS, + mod.GENERATION_PROJECTS, within=mod.TIMEPOINTS, rule=lambda m, g: ( tp for p in m.PERIODS_FOR_GEN[g] for tp in m.TPS_IN_PERIOD[p] @@ -175,33 +175,33 @@ def TPS_FOR_GEN_IN_PERIOD_rule(m, gen, period): if len(m._TPS_FOR_GEN_IN_PERIOD_dict) == 0: delattr(m, '_TPS_FOR_GEN_IN_PERIOD_dict') return result - mod.TPS_FOR_GEN_IN_PERIOD = Set(mod.GENERATION_PROJECTS, mod.PERIODS, + mod.TPS_FOR_GEN_IN_PERIOD = Set(mod.GENERATION_PROJECTS, mod.PERIODS, within=mod.TIMEPOINTS, rule=TPS_FOR_GEN_IN_PERIOD_rule) mod.GEN_TPS = Set( dimen=2, initialize=lambda m: ( - (g, tp) - for g in m.GENERATION_PROJECTS + (g, tp) + for g in m.GENERATION_PROJECTS for tp in m.TPS_FOR_GEN[g])) mod.VARIABLE_GEN_TPS = Set( dimen=2, initialize=lambda m: ( - (g, tp) + (g, tp) for g in m.VARIABLE_GENS for tp in m.TPS_FOR_GEN[g])) - mod._FUEL_BASED_GEN_TPS = Set( + mod.FUEL_BASED_GEN_TPS = Set( dimen=2, initialize=lambda m: ( - (g, tp) + (g, tp) for g in m.FUEL_BASED_GENS for tp in m.TPS_FOR_GEN[g])) mod.GEN_TP_FUELS = Set( dimen=3, initialize=lambda m: ( - (g, t, f) - for (g, t) in m._FUEL_BASED_GEN_TPS + (g, t, f) + for (g, t) in m.FUEL_BASED_GEN_TPS for f in m.FUELS_FOR_GEN[g])) mod.GenCapacityInTP = Expression( diff --git a/switch_model/generators/core/no_commit.py b/switch_model/generators/core/no_commit.py index 701ad48eb..18a83fbb5 100644 --- a/switch_model/generators/core/no_commit.py +++ b/switch_model/generators/core/no_commit.py @@ -56,21 +56,21 @@ def define_components(mod): """ - # NOTE: DispatchBaseloadByPeriod should eventually be replaced by + # NOTE: DispatchBaseloadByPeriod should eventually be replaced by # an "ActiveCapacityDuringPeriod" decision variable that applies to all # projects. This should be constrained - # based on the amount of installed capacity each period, and then + # based on the amount of installed capacity each period, and then # DispatchUpperLimit and DispatchLowerLimit should be calculated - # relative to ActiveCapacityDuringPeriod. Fixed O&M (but not capital + # relative to ActiveCapacityDuringPeriod. Fixed O&M (but not capital # costs) should be calculated based on ActiveCapacityDuringPeriod. # This would allow mothballing (and possibly restarting) projects. # Choose flat operating level for baseload plants during each period # (not necessarily running all available capacity) - # Note: this is unconstrained, because other constraints limit project + # Note: this is unconstrained, because other constraints limit project # dispatch during each timepoint and therefore the level of this variable. mod.DispatchBaseloadByPeriod = Var(mod.BASELOAD_GENS, mod.PERIODS) - + def DispatchUpperLimit_expr(m, g, t): if g in m.VARIABLE_GENS: return (m.GenCapacityInTP[g, t] * m.gen_availability[g] * @@ -83,7 +83,7 @@ def DispatchUpperLimit_expr(m, g, t): mod.Enforce_Dispatch_Baseload_Flat = Constraint( mod.GEN_TPS, - rule=lambda m, g, t: + rule=lambda m, g, t: (m.DispatchGen[g, t] == m.DispatchBaseloadByPeriod[g, m.tp_period[t]]) if g in m.BASELOAD_GENS else Constraint.Skip) @@ -94,8 +94,7 @@ def DispatchUpperLimit_expr(m, g, t): m.DispatchGen[g, t] <= m.DispatchUpperLimit[g, t])) mod.GenFuelUseRate_Calculate = Constraint( - mod._FUEL_BASED_GEN_TPS, + mod.FUEL_BASED_GEN_TPS, rule=lambda m, g, t: ( - sum(m.GenFuelUseRate[g, t, f] - for f in m.FUELS_FOR_GEN[g]) + sum(m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g]) == m.DispatchGen[g, t] * m.gen_full_load_heat_rate[g])) diff --git a/switch_model/hawaii/rps.py b/switch_model/hawaii/rps.py index 19a8eb207..2a8bfbda5 100644 --- a/switch_model/hawaii/rps.py +++ b/switch_model/hawaii/rps.py @@ -202,7 +202,7 @@ def simple_DispatchGenRenewableMW(m): # fuel usage and the full load heat rate. This also allows use of # multiple fuels in the same project at the same time. m.DispatchGenRenewableMW = Expression( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, t: sum( m.GenFuelUseRate[g, t, f] @@ -231,30 +231,30 @@ def split_commit_DispatchGenRenewableMW(m): # can't get committed in the 100% RPS due to non-zero min loads) # count amount of renewable power produced from project - m.DispatchGenRenewableMW = Var(m._FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.DispatchGenRenewableMW_Cap = Constraint(m._FUEL_BASED_GEN_TPS, + m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) + m.DispatchGenRenewableMW_Cap = Constraint(m.FUEL_BASED_GEN_TPS, rule = lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] ) # a portion of every startup and shutdown must be designated as renewable - m.CommitGenRenewable = Var(m._FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.CommitGenRenewable_Cap = Constraint(m._FUEL_BASED_GEN_TPS, + m.CommitGenRenewable = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) + m.CommitGenRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, rule = lambda m, g, tp: m.CommitGenRenewable[g, tp] <= m.CommitGen[g, tp] ) - m.StartupGenCapacityRenewable = Var(m._FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.StartupGenCapacityRenewable_Cap = Constraint(m._FUEL_BASED_GEN_TPS, + m.StartupGenCapacityRenewable = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) + m.StartupGenCapacityRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, rule = lambda m, g, tp: m.StartupGenCapacityRenewable[g, tp] <= m.StartupGenCapacity[g, tp] ) - m.ShutdownGenCapacityRenewable = Var(m._FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.ShutdownGenCapacityRenewable_Cap = Constraint(m._FUEL_BASED_GEN_TPS, + m.ShutdownGenCapacityRenewable = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) + m.ShutdownGenCapacityRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, rule = lambda m, g, tp: m.ShutdownGenCapacityRenewable[g, tp] <= m.ShutdownGenCapacity[g, tp] ) # chain commitments, startup and shutdown for renewables m.Commit_StartupGenCapacity_ShutdownGenCapacity_Consistency_Renewable = Constraint( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: m.CommitGenRenewable[g, m.tp_previous[tp]] + m.StartupGenCapacityRenewable[g, tp] @@ -263,13 +263,13 @@ def split_commit_DispatchGenRenewableMW(m): ) # must use committed capacity for renewable production m.Enforce_Dispatch_Upper_Limit_Renewable = Constraint( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.CommitGenRenewable[g, tp] ) # can't dispatch non-renewable capacity below its lower limit m.Enforce_Dispatch_Lower_Limit_Non_Renewable = Constraint( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) >= @@ -313,20 +313,20 @@ def relaxed_split_commit_DispatchGenRenewableMW(m): # is 100%. # count amount of renewable power produced from project - m.DispatchGenRenewableMW = Var(m._FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.DispatchGenRenewableMW_Cap = Constraint(m._FUEL_BASED_GEN_TPS, + m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) + m.DispatchGenRenewableMW_Cap = Constraint(m.FUEL_BASED_GEN_TPS, rule = lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] ) - m.StartupGenCapacityRenewable = Var(m._FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.StartupGenCapacityRenewable_Cap = Constraint(m._FUEL_BASED_GEN_TPS, + m.StartupGenCapacityRenewable = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) + m.StartupGenCapacityRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, rule = lambda m, g, tp: m.StartupGenCapacityRenewable[g, tp] <= m.StartupGenCapacity[g, tp] ) # can't dispatch non-renewable capacity below its lower limit m.Enforce_Dispatch_Lower_Limit_Non_Renewable = Constraint( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) >= @@ -441,10 +441,10 @@ def rule(m, g, tp): return m.DispatchGen[g, tp] else: return 0.0 - m.DispatchGenRenewableMW = Expression(m._FUEL_BASED_GEN_TPS, rule=rule) + m.DispatchGenRenewableMW = Expression(m.FUEL_BASED_GEN_TPS, rule=rule) else: m.DispatchGenRenewableMW = Expression( - m._FUEL_BASED_GEN_TPS, within=NonNegativeReals, + m.FUEL_BASED_GEN_TPS, within=NonNegativeReals, rule=lambda m, g, tp: 0.0 ) @@ -462,7 +462,7 @@ def binary_by_period_DispatchGenRenewableMW(m): m.GEN_WITH_FUEL_ACTIVE_PERIODS = Set(dimen=2, initialize=lambda m: { (g, pe) for g in m.FUEL_BASED_GENS for pe in m.PERIODS - if (g, m.TPS_IN_PERIOD[pe].first()) in m._FUEL_BASED_GEN_TPS + if (g, m.TPS_IN_PERIOD[pe].first()) in m.FUEL_BASED_GEN_TPS }) # choose whether to run (only) on renewable fuels during each period @@ -491,17 +491,17 @@ def rule(m, g, p): ) # count amount of renewable power produced from project - m.DispatchGenRenewableMW = Var(m._FUEL_BASED_GEN_TPS, within=NonNegativeReals) + m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) # don't overcount renewable power production m.Limit_DispatchGenRenewableMW = Constraint( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] ) # force the flag to be set during renewable timepoints m.Set_DispatchRenewableFlag = Constraint( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= @@ -532,7 +532,7 @@ def binary_by_timeseries_DispatchGenRenewableMW(m): m.GEN_WITH_FUEL_ACTIVE_TIMESERIES = Set(dimen=2, initialize=lambda m: { (g, ts) for g in m.FUEL_BASED_GENS for ts in m.TIMESERIES - if (g, m.TPS_IN_TS[ts].first()) in m._FUEL_BASED_GEN_TPS + if (g, m.TPS_IN_TS[ts].first()) in m.FUEL_BASED_GEN_TPS }) # choose whether to run (only) on renewable fuels during each period @@ -550,17 +550,17 @@ def binary_by_timeseries_DispatchGenRenewableMW(m): ) # count amount of renewable power produced from project - m.DispatchGenRenewableMW = Var(m._FUEL_BASED_GEN_TPS, within=NonNegativeReals) + m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) # don't overcount renewable power production m.Limit_DispatchGenRenewableMW = Constraint( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] ) # force the flag to be set during renewable timepoints m.Set_DispatchRenewableFlag = Constraint( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= @@ -588,19 +588,19 @@ def binary_by_timeseries_DispatchGenRenewableMW(m): def advanced2_DispatchGenRenewableMW(m): # choose whether to run (only) on renewable fuels during each timepoint - m.DispatchRenewableFlag = Var(m._FUEL_BASED_GEN_TPS, within=Binary) + m.DispatchRenewableFlag = Var(m.FUEL_BASED_GEN_TPS, within=Binary) # count amount of renewable power produced from project - m.DispatchGenRenewableMW = Var(m._FUEL_BASED_GEN_TPS, within=NonNegativeReals) + m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) # don't overcount renewable power production m.Limit_DispatchGenRenewableMW = Constraint( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] ) # force the flag to be set during renewable timepoints m.Set_DispatchRenewableFlag = Constraint( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= @@ -626,7 +626,7 @@ def advanced1_DispatchGenRenewableMW(m): m.DispatchGenRenewableMW = Var(m.GEN_TP_FUELS, within=NonNegativeReals) # make sure this matches total production m.DispatchGenRenewableMW_Total = Constraint( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: sum(m.DispatchGenRenewableMW[g, tp, f] for f in m.FUELS_FOR_GEN[g]) == @@ -636,7 +636,7 @@ def advanced1_DispatchGenRenewableMW(m): # choose a single fuel to use during each timestep m.DispatchFuelFlag = Var(m.GEN_TP_FUELS, within=Binary) m.DispatchFuelFlag_Total = Constraint( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: sum(m.DispatchFuelFlag[g, tp, f] for f in m.FUELS_FOR_GEN[g]) == @@ -692,21 +692,21 @@ def advanced1_DispatchGenRenewableMW(m): def quadratic_DispatchGenRenewableMW(m): # choose how much power to obtain from renewables during each timepoint - m.DispatchRenewableFraction = Var(m._FUEL_BASED_GEN_TPS, within=PercentFraction) + m.DispatchRenewableFraction = Var(m.FUEL_BASED_GEN_TPS, within=PercentFraction) # count amount of renewable power produced from project - m.DispatchGenRenewableMW = Var(m._FUEL_BASED_GEN_TPS, within=NonNegativeReals) + m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) # don't overcount renewable power production m.Set_DispatchRenewableFraction = Constraint( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchRenewableFraction[g, tp] * m.DispatchGen[g, tp] ) m.Enforce_DispatchRenewableFraction = Constraint( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: sum( m.GenFuelUseRate[g, tp, f] @@ -727,7 +727,7 @@ def quadratic1_DispatchGenRenewableMW(m): # make sure this matches total production m.DispatchGenRenewableMW_Total = Constraint( - m._FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: sum(m.DispatchGenRenewableMW[g, tp, f] for f in m.FUELS_FOR_GEN[g]) == diff --git a/switch_model/policies/rps_simple.py b/switch_model/policies/rps_simple.py index 0c6e06ead..32ffd1e19 100644 --- a/switch_model/policies/rps_simple.py +++ b/switch_model/policies/rps_simple.py @@ -37,7 +37,7 @@ def define_components(mod): rps_target[p in RPS_PERIODS] is the fraction of total generated energy in a period that has to be provided by RPS-elegible sources. - RPSProjFuelPower[g, t in _FUEL_BASED_GEN_TPS] is an + RPSProjFuelPower[g, t in FUEL_BASED_GEN_TPS] is an expression summarizing the power generated by RPS-elegible fuels in every fuel-based project. This cannot be simply taken to be equal to the dispatch level of the project, since a mix of RPS-elegible and unelegible From d57cccfb8685ab2d09eb6572066acff82f438a46 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 27 Jun 2018 21:43:53 -1000 Subject: [PATCH 27/51] Update Hawaii modules to work with spinning_reserves_advanced module --- switch_model/hawaii/hydrogen.py | 79 ++++++++++++--- .../hawaii/register_hi_storage_reserves.py | 16 +-- switch_model/hawaii/save_results.py | 97 ++++++++++++++++++- switch_model/hawaii/scenario_data.py | 76 ++++++++------- switch_model/hawaii/smooth_dispatch.py | 11 +++ 5 files changed, 215 insertions(+), 64 deletions(-) diff --git a/switch_model/hawaii/hydrogen.py b/switch_model/hawaii/hydrogen.py index b5bd05e4a..e23e18c14 100644 --- a/switch_model/hawaii/hydrogen.py +++ b/switch_model/hawaii/hydrogen.py @@ -2,6 +2,13 @@ from pyomo.environ import * from switch_model.financials import capital_recovery_factor as crf +def define_arguments(argparser): + argparser.add_argument('--hydrogen-reserve-types', nargs='+', default=['spinning'], + help= + "Type(s) of reserves to provide from hydrogen infrastructure (e.g., 'contingency regulation'). " + "Specify 'none' to disable." + ) + def define_components(m): # electrolyzer details @@ -185,22 +192,62 @@ def define_components(m): m.Cost_Components_Per_Period.append('HydrogenFixedCostAnnual') # Register with spinning reserves if it is available - if 'Spinning_Reserve_Up_Provisions' in dir(m): - m.HydrogenSpinningReserveUp = Expression( - m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: - sum(m.HydrogenSlackUp[z, t] - for z in m.ZONES_IN_BALANCING_AREA[b]) - ) - m.Spinning_Reserve_Up_Provisions.append('HydrogenSpinningReserveUp') - - m.HydrogenSpinningReserveDown = Expression( - m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: \ - sum(m.HydrogenSlackDown[z, t] - for z in m.ZONES_IN_BALANCING_AREA[b]) - ) - m.Spinning_Reserve_Down_Provisions.append('HydrogenSpinningReserveDown') + if [rt.lower() for rt in m.options.hydrogen_reserve_types] != ['none']: + # Register with spinning reserves + if hasattr(m, 'Spinning_Reserve_Up_Provisions'): + # calculate available slack from hydrogen equipment + m.HydrogenSlackUpForArea = Expression( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, t: + sum(m.HydrogenSlackUp[z, t] for z in m.ZONES_IN_BALANCING_AREA[b]) + ) + m.HydrogenSlackDownForArea = Expression( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, t: + sum(m.HydrogenSlackDown[z, t] for z in m.ZONES_IN_BALANCING_AREA[b]) + ) + if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + # using advanced formulation, index by reserve type, balancing area, timepoint + # define variables for each type of reserves to be provided + # choose how to allocate the slack between the different reserve products + m.HYDROGEN_SPINNING_RESERVE_TYPES = Set( + initialize=m.options.hydrogen_reserve_types + ) + m.HydrogenSpinningReserveUp = Var( + m.HYDROGEN_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals + ) + m.HydrogenSpinningReserveDown = Var( + m.HYDROGEN_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals + ) + # constrain reserve provision within available slack + m.Limit_HydrogenSpinningReserveUp = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: + sum( + m.HydrogenSpinningReserveUp[rt, ba, tp] + for rt in m.HYDROGEN_SPINNING_RESERVE_TYPES + ) <= m.HydrogenSlackUpForArea[ba, tp] + ) + m.Limit_HydrogenSpinningReserveDown = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: + sum( + m.HydrogenSpinningReserveDown[rt, ba, tp] + for rt in m.HYDROGEN_SPINNING_RESERVE_TYPES + ) <= m.HydrogenSlackDownForArea[ba, tp] + ) + m.Spinning_Reserve_Up_Provisions.append('HydrogenSpinningReserveUp') + m.Spinning_Reserve_Down_Provisions.append('HydrogenSpinningReserveDown') + else: + # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint + if m.options.hydrogen_reserve_types != ['spinning']: + raise ValueError( + 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' + ) + m.Spinning_Reserve_Up_Provisions.append('HydrogenSlackUpForArea') + m.Spinning_Reserve_Down_Provisions.append('HydrogenSlackDownForArea') def load_inputs(mod, switch_data, inputs_dir): diff --git a/switch_model/hawaii/register_hi_storage_reserves.py b/switch_model/hawaii/register_hi_storage_reserves.py index f5f6876a9..611d20c3a 100644 --- a/switch_model/hawaii/register_hi_storage_reserves.py +++ b/switch_model/hawaii/register_hi_storage_reserves.py @@ -7,15 +7,16 @@ # TODO: use standard reserves module for this # note: this is modeled off of hawaii.reserves, to avoid adding lots of -# reserve-related code to the pumped storage and hydrogen modules. +# reserve-related code to the pumped storage and (formerly) hydrogen modules. # But eventually those modules should use the standard storage module and # extend that as needed. def define_arguments(argparser): argparser.add_argument('--hawaii-storage-reserve-types', nargs='+', default=['spinning'], help= - "Type(s) of reserves to provide from hydrogen and/or pumped-hydro storage " - "(e.g., 'contingency' or 'regulation'). " + "Type(s) of reserves to provide from " # hydrogen and/or + "pumped-hydro storage " + "(e.g., 'contingency regulation'). " "Default is generic 'spinning'. Specify 'none' to disable." ) @@ -55,8 +56,9 @@ def define_components(m): # calculate available slack from hawaii storage def up_expr(m, a, tp): avail = 0.0 - if hasattr(m, 'HydrogenSlackUp'): - avail += sum(m.HydrogenSlackUp[z, tp] for z in m.ZONES_IN_BALANCING_AREA[a]) + # now handled in hydrogen module: + # if hasattr(m, 'HydrogenSlackUp'): + # avail += sum(m.HydrogenSlackUp[z, tp] for z in m.ZONES_IN_BALANCING_AREA[a]) if hasattr(m, 'PumpedStorageSpinningUpReserves'): avail += sum( m.PumpedStorageSpinningUpReserves[phg, tp] @@ -67,8 +69,8 @@ def up_expr(m, a, tp): m.HawaiiStorageSlackUp = Expression(m.BALANCING_AREA_TIMEPOINTS, rule=up_expr) def down_expr(m, a, tp): avail = 0.0 - if hasattr(m, 'HydrogenSlackDown'): - avail += sum(m.HydrogenSlackDown[z, tp] for z in m.ZONES_IN_BALANCING_AREA[a]) + # if hasattr(m, 'HydrogenSlackDown'): + # avail += sum(m.HydrogenSlackDown[z, tp] for z in m.ZONES_IN_BALANCING_AREA[a]) if hasattr(m, 'PumpedStorageSpinningDownReserves'): avail += sum( m.PumpedStorageSpinningDownReserves[phg, tp] diff --git a/switch_model/hawaii/save_results.py b/switch_model/hawaii/save_results.py index 95eb38b90..60645be82 100644 --- a/switch_model/hawaii/save_results.py +++ b/switch_model/hawaii/save_results.py @@ -125,8 +125,13 @@ def DispatchGenByFuel(m, g, tp, fuel): project.no_commit, not project.unitcommit.fuel_use. In the unit commitment version it can only be defined as a quadratically constrained variable, which we don't want to force on all users.""" - dispatch = value(m.DispatchGen[g, tp]) if (g, tp) in m.DispatchGen else 0.0 - total_fuel = value(sum(m.GenFuelUseRate[g, tp, f] for f in m.FUELS_FOR_GEN[g])) + if (g, tp) in m.DispatchGen: + dispatch = value(m.DispatchGen[g, tp]) + total_fuel = value(sum(m.GenFuelUseRate[g, tp, f] for f in m.FUELS_FOR_GEN[g])) + else: + dispatch = 0.0 + total_fuel = 0.0 + if dispatch == 0.0: result = 0.0 elif total_fuel == 0.0: @@ -149,6 +154,25 @@ def write_results(m, outputs_dir): values=lambda m: summary_values(m) ) + if hasattr(m, 'Spinning_Reserve_Up_Requirements'): + # pre-calculate amount of reserves provided and needed for each balancing area and timepoint + spinning_reserve_provisions = defaultdict(float) + spinning_reserve_requirements = defaultdict(float) + if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): # advanced module + for component in m.Spinning_Reserve_Up_Provisions: + for (rt, ba, tp), val in getattr(m, component).items(): + spinning_reserve_provisions[ba, tp] += val + for component in m.Spinning_Reserve_Up_Requirements: + for (rt, ba, tp), val in getattr(m, component).items(): + spinning_reserve_requirements[ba, tp] += val + else: # basic module + for component in m.Spinning_Reserve_Up_Provisions: + for (ba, tp), val in getattr(m, component).items(): + spinning_reserve_provisions[ba, tp] += val + for component in m.Spinning_Reserve_Up_Requirements: + for (ba, tp), val in getattr(m, component).items(): + spinning_reserve_requirements[ba, tp] += val + # # write out results # util.write_table(m, m.TIMEPOINTS, # output_file=os.path.join(outputs_dir, "dispatch{t}.tsv".format(t=tag)), @@ -169,7 +193,8 @@ def write_results(m, outputs_dir): +tuple("curtail_"+s for s in m.NON_FUEL_ENERGY_SOURCES) +tuple(m.Zone_Power_Injections) +tuple(m.Zone_Power_Withdrawals) - +("marginal_cost","peak_day"), + +("spinning_reserve_provision", "spinning_reserve_requirement") + +("marginal_cost", "peak_day"), values=lambda m, z, t: (z, m.tp_period[t], m.tp_timestamp[t]) +tuple( @@ -198,11 +223,75 @@ def write_results(m, outputs_dir): ) +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Injections) +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals) + +( # save spinning reserve requirements and provisions; note: this assumes one zone per balancing area + (spinning_reserve_provisions[m.zone_balancing_area[z], t], spinning_reserve_requirements[m.zone_balancing_area[z], t]) + if hasattr(m, 'Spinning_Reserve_Up_Requirements') + else (0.0, 0.0) + ) +(util.get(m.dual, m.Zone_Energy_Balance[z, t], 0.0)/m.bring_timepoint_costs_to_base_year[t], # note: this uses 0.0 if no dual available, i.e., with glpk solver 'peak' if m.ts_scale_to_year[m.tp_ts[t]] < avg_ts_scale else 'typical') ) - + + if hasattr(m, 'Spinning_Reserve_Up_Requirements') and hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): # advanced module + # write the reserve values + util.write_table( + m, m.BALANCING_AREAS, m.TIMEPOINTS, + output_file=os.path.join(outputs_dir, "up_reserve_sources{t}.tsv".format(t=tag)), + headings= + ("balancing_area", "period", "timepoint_label") + +tuple(m.FUELS) + +tuple(m.NON_FUEL_ENERGY_SOURCES) + +tuple(m.Spinning_Reserve_Up_Provisions) + +tuple(m.Spinning_Reserve_Up_Requirements) + +tuple("marginal_cost_"+rt for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS) + +("peak_day",), + values=lambda m, ba, t: + (ba, m.tp_period[t], m.tp_timestamp[t]) + +tuple( + ( + sum( + # total reserve production + sum( + m.CommitGenSpinningReservesUp[rt, p, t] + for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[p] + ) + # prorated by energy source used + * DispatchGenByFuel(m, p, t, f) / m.DispatchGen[p, t] + for p in m.GENERATION_PROJECTS_BY_FUEL[f] + if (p, t) in m.GEN_TPS and m.zone_balancing_area[m.gen_load_zone[p]] == ba + ) + ) + for f in m.FUELS + ) + +tuple( + sum( + m.CommitGenSpinningReservesUp[rt, p, t] + for p in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s] + if (p, t) in m.SPINNING_RESERVE_CAPABLE_GEN_TPS and m.zone_balancing_area[m.gen_load_zone[p]] == ba + for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[p] + ) + for s in m.NON_FUEL_ENERGY_SOURCES + ) + +tuple( + sum(util.get(getattr(m, component), (rt, ba, t), 0.0) for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS) + for component in m.Spinning_Reserve_Up_Provisions + ) + +tuple( + sum(util.get(getattr(m, component), (rt, ba, t), 0.0) for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS) + for component in m.Spinning_Reserve_Up_Requirements + ) + +tuple( + util.get( + m.dual, + util.get(m.Satisfy_Spinning_Reserve_Up_Requirement, (rt, ba, t), None), + 0.0 # note: this uses 0.0 if no dual available, i.e., with glpk solver + ) / m.bring_timepoint_costs_to_base_year[t] + for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS + ) + +(('peak' if m.ts_scale_to_year[m.tp_ts[t]] < avg_ts_scale else 'typical'),) + ) + sorted_projects = tuple(sorted(g for g in m.GENERATION_PROJECTS)) util.write_table( m, m.TIMEPOINTS, diff --git a/switch_model/hawaii/scenario_data.py b/switch_model/hawaii/scenario_data.py index 65f4c8349..cec179f48 100644 --- a/switch_model/hawaii/scenario_data.py +++ b/switch_model/hawaii/scenario_data.py @@ -479,44 +479,46 @@ def write_tables(**args): print "WARNING: and have not been updated to the scenario base year of {}.".format(args['base_financial_year']) ######################### - # spinning_reserves_advanced - # args['max_reserve_capability'] is a list of tuples of (technology, reserve_type) - # (assumed equivalent to 'regulation' if not specified) - # We unzip it to use with the unnest function (psycopg2 passes lists of tuples - # as arrays of tuples, and unnest would keeps those as tuples) - try: - reserve_technologies, reserve_types = map(list, zip(*args['max_reserve_capability'])) - except KeyError: - reserve_technologies, reserve_types = [], [] - res_args = args.copy() - res_args['reserve_technologies']=reserve_technologies - res_args['reserve_types']=reserve_types - - # note: casting is needed if the lists are empty; see https://stackoverflow.com/a/41893576/3830997 - write_table('generation_projects_reserve_capability.tab', """ - WITH reserve_capability (technology, reserve_type) as ( + # spinning_reserves_advanced (if wanted; otherwise defaults to just "spinning" + if 'max_reserve_capability' in args or args.get('write_generation_projects_reserve_capability', False): + + # args['max_reserve_capability'] is a list of tuples of (technology, reserve_type) + # (assumed equivalent to 'regulation' if not specified) + # We unzip it to use with the unnest function (psycopg2 passes lists of tuples + # as arrays of tuples, and unnest would keeps those as tuples) + try: + reserve_technologies, reserve_types = map(list, zip(*args['max_reserve_capability'])) + except KeyError: + reserve_technologies, reserve_types = [], [] + res_args = args.copy() + res_args['reserve_technologies']=reserve_technologies + res_args['reserve_types']=reserve_types + + # note: casting is needed if the lists are empty; see https://stackoverflow.com/a/41893576/3830997 + write_table('generation_projects_reserve_capability.tab', """ + WITH reserve_capability (technology, reserve_type) as ( + SELECT + UNNEST(%(reserve_technologies)s::varchar(40)[]) AS technology, + UNNEST(%(reserve_types)s::varchar(20)[]) AS reserve_type + ), + reserve_types (rank, reserve_type) as ( + VALUES + (0, 'none'), + (1, 'contingency'), + (2, 'regulation') + ) SELECT - UNNEST(%(reserve_technologies)s::varchar(40)[]) AS technology, - UNNEST(%(reserve_types)s::varchar(20)[]) AS reserve_type - ), - reserve_types (rank, reserve_type) as ( - VALUES - (0, 'none'), - (1, 'contingency'), - (2, 'regulation') - ) - SELECT - p."GENERATION_PROJECT", - t2.reserve_type AS "SPINNING_RESERVE_TYPE" - FROM - study_projects p - LEFT JOIN reserve_capability c USING (technology) - LEFT JOIN reserve_types t1 USING (reserve_type) - JOIN reserve_types t2 on t2.rank <= COALESCE(t1.rank, 100) - WHERE t2.rank > 0 - ORDER BY 1, t2.rank; - """, res_args) - + p."GENERATION_PROJECT", + t2.reserve_type AS "SPINNING_RESERVE_TYPE" + FROM + study_projects p + LEFT JOIN reserve_capability c USING (technology) + LEFT JOIN reserve_types t1 USING (reserve_type) + JOIN reserve_types t2 on t2.rank <= COALESCE(t1.rank, 100) + WHERE t2.rank > 0 + ORDER BY 1, t2.rank; + """, res_args) + ######################### # operation.unitcommit.fuel_use diff --git a/switch_model/hawaii/smooth_dispatch.py b/switch_model/hawaii/smooth_dispatch.py index 566276507..224ecb294 100644 --- a/switch_model/hawaii/smooth_dispatch.py +++ b/switch_model/hawaii/smooth_dispatch.py @@ -39,6 +39,17 @@ def Smooth_Free_Variables_obj_rule(m): print "Will smooth charging and discharging of standard storage." obj += sum(m.ChargeStorage[g, tp]*m.ChargeStorage[g, tp] for g, tp in m.STORAGE_GEN_TPS) obj += sum(m.DispatchGen[g, tp]*m.DispatchGen[g, tp] for g, tp in m.STORAGE_GEN_TPS) + # also maximize up reserves, which will (a) minimize arbitrary burning off of renewables + # (e.g., via storage) and (b) give better representation of the amount of reserves actually available + if hasattr(m, 'Spinning_Reserve_Up_Provisions') and hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): # advanced module + print "Will maximize provision of up reserves." + reserve_weight = {'contingency': 0.9, 'regulation': 1.1} + for comp_name in m.Spinning_Reserve_Up_Provisions: + component = getattr(m, comp_name) + obj += -0.1 * sum( + reserve_weight.get(rt, 1.0) * component[rt, ba, tp] + for rt, ba, tp in component + ) return obj m.Smooth_Free_Variables = Objective(rule=Smooth_Free_Variables_obj_rule, sense=minimize) # leave standard objective in effect for now From 689f37e83f013fe895d33b1017ab970c25ead1f6 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 27 Jun 2018 21:47:43 -1000 Subject: [PATCH 28/51] Allow linear (non-binary) relaxation of Kalaeloa operating variables --- switch_model/hawaii/kalaeloa.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/switch_model/hawaii/kalaeloa.py b/switch_model/hawaii/kalaeloa.py index 7ea611de4..70f765fdb 100644 --- a/switch_model/hawaii/kalaeloa.py +++ b/switch_model/hawaii/kalaeloa.py @@ -11,8 +11,6 @@ def define_components(m): # commit units 1 & 2, run each between 65 and 90 MW # run both 1 & 2 at 90 MW, and run 3 at 28 MW - more_than_kalaeloa_capacity = 220 # used for big-m constraints on individual units - m.KALAELOA_MAIN_UNITS = Set( initialize=["Oahu_Kalaeloa_CC1", "Oahu_Kalaeloa_CC2", "Kalaeloa_CC1", "Kalaeloa_CC2"], filter=lambda m, g: g in m.GENERATION_PROJECTS @@ -39,13 +37,14 @@ def define_components(m): ) # run kalaeloa at full power or not + # (if linearized, this is the fraction of capacity that is dispatched) m.RunKalaeloaUnitFull = Var(m.KALAELOA_MAIN_UNIT_DISPATCH_POINTS, within=Binary) - m.Run_Kalaeloa_Unit_Full_Enforce = Constraint( + m.Run_Kalaeloa_Unit_Full_Enforce = Constraint( # big-m constraint m.KALAELOA_MAIN_UNIT_DISPATCH_POINTS, rule=lambda m, g, tp: m.DispatchGen[g, tp] - + (1 - m.RunKalaeloaUnitFull[g, tp]) * more_than_kalaeloa_capacity + + (1 - m.RunKalaeloaUnitFull[g, tp]) * m.gen_capacity_limit_mw[g] >= m.GenCapacityInTP[g, tp] * m.gen_availability[g] ) @@ -56,7 +55,7 @@ def define_components(m): rule=lambda m, g_duct, tp, g_main: m.DispatchGen[g_duct, tp] <= - m.RunKalaeloaUnitFull[g_main, tp] * more_than_kalaeloa_capacity + m.RunKalaeloaUnitFull[g_main, tp] * m.gen_capacity_limit_mw[g_duct] ) # force at least one Kalaeloa unit to run at full power at all times From 9f77124d629b4670b1a14321163d70673a408a43 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 27 Jun 2018 21:49:16 -1000 Subject: [PATCH 29/51] Bug fixes and cosmetic changes in Hawaii package --- switch_model/hawaii/rps.py | 42 ++--- switch_model/hawaii/scenario_data.py | 236 ++++++++++++++------------- 2 files changed, 141 insertions(+), 137 deletions(-) diff --git a/switch_model/hawaii/rps.py b/switch_model/hawaii/rps.py index 2a8bfbda5..6842d533c 100644 --- a/switch_model/hawaii/rps.py +++ b/switch_model/hawaii/rps.py @@ -35,6 +35,19 @@ def define_arguments(argparser): help="Method to use to allocate power output among fuels. Default is fuel_switch_by_period for models " + "with unit commitment, full_load_heat_rate for models without." ) + +# TODO: make this work with progressive hedging as follows: +# add a variable indexed over all weather scenarios and all cost scenarios, +# which shows how much of the RPS will be allocated to each scenario. +# Problem: we multiply the RPS target by total generation, so this will become quadratic? +# May instead need to treat the RPS more like a limit on non-renewable production (as a fraction of loads)? +# Designate the allocations as a first-stage variable. +# Require each subproblem to work within its part of the allocation. Also require in each subproblem +# that the allocations across all weather scenarios (within each cost scenario) average out to match the +# actual target (when applying the scenario weights). +# Then PHA will force all the scenarios to agree on how the target is allocated among them. +# Could do the same with hydrogen storage: require average hydrogen stored across all scenarios +# to be less than the size of the storage built. def define_components(m): """ @@ -383,11 +396,10 @@ def relaxed_split_commit_DispatchGenRenewableMW(m): dimen=3, initialize=lambda m: [ (g, tp, f) - for per in m.PERIODS if m.rps_target_for_period[per] == 1.0 - for g in m.FUEL_BASED_GENS - if (g, m.TPS_IN_PERIOD[per].first()) in m.GEN_TPS - for f in m.FUELS_FOR_GEN[g] if not m.f_rps_eligible[f] - for tp in m.TPS_IN_PERIOD[per] + for per in m.PERIODS if m.rps_target_for_period[per] == 1.0 + for g in m.FUEL_BASED_GENS if (g, per) in m.GEN_PERIODS + for f in m.FUELS_FOR_GEN[g] if not m.f_rps_eligible[f] + for tp in m.TPS_IN_PERIOD[per] ] ) m.No_Fossil_Fuel_With_Full_RPS = Constraint( @@ -395,26 +407,6 @@ def relaxed_split_commit_DispatchGenRenewableMW(m): rule=lambda m, g, tp, f: m.GenFuelUseRate[g, tp, f] == 0.0 ) - # only count biofuels toward RPS - # prevent use of non-renewable fuels during renewable timepoints - def Enforce_DispatchRenewableFlag_rule(m, g, tp, f): - if m.f_rps_eligible[f]: - return Constraint.Skip - else: - # harder to read like this, but having all numerical values on the right hand side - # facilitates analysis of duals and reduced costs - # note: we also add a little slack to avoid having this be the main constraint - # on total output from any power plant (that also clarifies dual analysis) - big_fuel = 1.01 * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] - return ( - m.GenFuelUseRate[g, tp, f] - + m.DispatchRenewableFlag[g, m.tp_period[tp]] * big_fuel - <= - big_fuel - ) - m.Enforce_DispatchRenewableFlag = Constraint( - m.GEN_TP_FUELS, rule=Enforce_DispatchRenewableFlag_rule - ) def fuel_switch_at_high_rps_DispatchGenRenewableMW(m): """ switch all plants to biofuel (and count toward RPS) if and only if rps is above threshold """ diff --git a/switch_model/hawaii/scenario_data.py b/switch_model/hawaii/scenario_data.py index cec179f48..cf7319760 100644 --- a/switch_model/hawaii/scenario_data.py +++ b/switch_model/hawaii/scenario_data.py @@ -1,9 +1,9 @@ # TODO: make this get data from the redr server via an HTTP api instead of psycopg2, as follows: # create a .rpy script on the redr server that can accept form data (the args dict) via POST -# and then return a .zip file containing all the files created by write_tables (most of the +# and then return a .zip file containing all the files created by write_tables (most of the # code in this module would go into that script). This can create the files as text blobs and -# then collect them into a single .zip file using the zip module +# then collect them into a single .zip file using the zip module # Info on zipping multiple files together in memory: https://stackoverflow.com/a/25194850/3830997 # See here for info on .rpy files: # https://twistedmatrix.com/documents/15.0.0/web/howto/using-twistedweb.html#web-howto-using-twistedweb-rpys @@ -12,9 +12,9 @@ # client side will then just send a POST request with the args dictionary (probably using the # requests module), receive back a zip file with all the relevant CSVs (probably with a whole -# relative directory structure). Client may also need to convert line endings (or unzip may do +# relative directory structure). Client may also need to convert line endings (or unzip may do # it automatically). -# See here for info on sending a Python dict as the body in a +# See here for info on sending a Python dict as the body in a # POST request: https://stackoverflow.com/a/14804320/3830997 # https://stackoverflow.com/questions/15694120/why-does-http-post-request-body-need-to-be-json-enconded-in-python # https://stackoverflow.com/questions/35212279/python-request-post-not-accepting-dictionary @@ -28,11 +28,11 @@ from textwrap import dedent from switch_model import __version__ as switch_version -# NOTE: instead of using the python csv writer, this directly writes tables to +# NOTE: instead of using the python csv writer, this directly writes tables to # file in the pyomo .tab format. This uses tabs between columns and the standard # line break for the system it is run on. This does the following translations (only): # - If a value contains double quotes, they get doubled. -# - If a value contains a single quote, tab or space character, the value gets enclosed in double quotes. +# - If a value contains a single quote, tab or space character, the value gets enclosed in double quotes. # (Note that pyomo doesn't allow quoting (and therefore spaces) in column headers.) # - null values are converted to . (the pyomo/ampl standard for missing data) # - any other values are simply passed to str(). @@ -44,38 +44,38 @@ # NOTE: ANSI SQL specifies single quotes for literal strings, and postgres conforms # to this, so all the queries below should use single quotes around strings. -# NOTE: write_table() will automatically convert null values to '.', +# NOTE: write_table() will automatically convert null values to '.', # so pyomo will recognize them as missing data # NOTE: the code below could be made more generic, e.g., a list of # table names and queries, which are then processed at the end. -# But that would be harder to debug, and wouldn't allow for ad hoc +# But that would be harder to debug, and wouldn't allow for ad hoc # calculations or writing .dat files (which are used for a few parameters) def write_tables(**args): - # TODO: any arguments that are defined with default values below (args.get()) could + # TODO: any arguments that are defined with default values below (args.get()) could # have those default values assigned here. Then they can be used directly in queries # instead of using them to create snippets that are used in the queries. This would # also document the available arguments a little better. - + # catch obsolete arguments (otherwise they would be silently ignored) if 'ev_scen_id' in args: raise ValueError("ev_scen_id argument is no longer supported; use ev_scenario instead.") - + # write version marker file with open(make_file_path('switch_inputs_version.txt', args), 'w') as f: f.write(switch_version) - + ######################### # timescales - + # reusable clause to calculate the length of each period # If this is within 1% of an integer number of years, it rounds to the integer, # to allow for weights that add up to 365 or 365.25 days per year with_period_length = """ WITH period_length as ( - SELECT + SELECT period, -- note: for some reason modulo doesn't work on real values in postgresql CASE WHEN mod((sum(ts_scale_to_period)/365.25)::numeric, 1) BETWEEN -0.01 and 0.01 @@ -90,16 +90,16 @@ def write_tables(**args): GROUP BY 1 ) """ - + # note: in contrast to earlier versions, this makes period_end - # point to the exact moment when the period finishes + # point to the exact moment when the period finishes # (switch_model.timescales can handle that now), - # and it lets period_end be a floating point number + # and it lets period_end be a floating point number # (postgresql will export it with a .0 in this case) # note: despite the comments above, this rounded period_end to # the nearest whole number until 2018-02-17. This was removed to # support fractional years for monthly batches in production-cost models. - write_table('periods.tab', + write_table('periods.tab', with_period_length + """ SELECT p.period AS "INVESTMENT_PERIOD", p.period as period_start, @@ -110,7 +110,7 @@ def write_tables(**args): """, args) write_table('timeseries.tab', """ - SELECT study_date as "TIMESERIES", period as ts_period, + SELECT study_date as "TIMESERIES", period as ts_period, ts_duration_of_tp, ts_num_tps, ts_scale_to_period FROM study_date WHERE time_sample = %(time_sample)s @@ -118,10 +118,10 @@ def write_tables(**args): """, args) write_table('timepoints.tab', """ - SELECT h.study_hour as timepoint_id, + SELECT h.study_hour as timepoint_id, to_char(date_time + (period - extract(year from date_time)) * interval '1 year', 'YYYY-MM-DD-HH24:MI') as timestamp, - h.study_date as timeseries + h.study_date as timeseries FROM study_hour h JOIN study_date d USING (study_date, time_sample) WHERE h.time_sample = %(time_sample)s ORDER BY period, extract(doy from date), study_hour; @@ -141,7 +141,7 @@ def write_tables(**args): del cur ######################### - # create temporary tables that can be referenced by other queries + # create temporary tables that can be referenced by other queries # to identify available projects and technologies db_cursor().execute(""" DROP TABLE IF EXISTS study_length; @@ -152,25 +152,25 @@ def write_tables(**args): DROP TABLE IF EXISTS study_projects; CREATE TEMPORARY TABLE study_projects AS - SELECT DISTINCT + SELECT DISTINCT CONCAT_WS('_', load_zone, p.technology, nullif(site, 'na'), nullif(orientation, 'na')) AS "GENERATION_PROJECT", - p.* + p.* FROM project p JOIN generator_info g USING (technology) CROSS JOIN study_length -- existing projects still in use during the study LEFT JOIN proj_existing_builds e ON ( e.project_id = p.project_id - AND e.build_year + g.max_age_years > study_start + AND e.build_year + g.max_age_years > study_start AND e.build_year < study_end ) -- projects that could be built during the study LEFT JOIN generator_costs_by_year c ON ( c.cap_cost_scen_id = %(cap_cost_scen_id)s - AND c.technology = g.technology - AND (g.min_vintage_year IS NULL OR c.year >= g.min_vintage_year) - AND c.year >= study_start + AND c.technology = g.technology + AND (g.min_vintage_year IS NULL OR c.year >= g.min_vintage_year) + AND c.year >= study_start AND c.year < study_end ) WHERE (e.project_id IS NOT NULL OR c.technology IS NOT NULL) @@ -179,9 +179,9 @@ def write_tables(**args): DROP TABLE IF EXISTS study_generator_info; CREATE TEMPORARY TABLE study_generator_info AS - SELECT DISTINCT g.* + SELECT DISTINCT g.* FROM generator_info g JOIN study_projects p USING (technology); - """.format(with_period_length), args) + """.format(with_period_length), args) ######################### # financials @@ -201,7 +201,7 @@ def write_tables(**args): # existing_local_td, local_td_annual_cost_per_mw write_table('load_zones.tab', """ SELECT load_zone as "LOAD_ZONE" - FROM load_zone + FROM load_zone WHERE load_zone in %(load_zones)s """, args) @@ -210,15 +210,15 @@ def write_tables(**args): # get system loads, scaled from the historical years to the model years # note: 'offset' is a keyword in postgresql, so we use double-quotes to specify the column name write_table('loads.tab', """ - SELECT - l.load_zone AS "LOAD_ZONE", + SELECT + l.load_zone AS "LOAD_ZONE", study_hour AS "TIMEPOINT", - system_load * scale + "offset" AS zone_demand_mw - FROM study_date d + GREATEST(0, system_load * scale + "offset") AS zone_demand_mw + FROM study_date d JOIN study_hour h USING (time_sample, study_date) JOIN system_load l USING (date_time) JOIN system_load_scale s ON ( - s.load_zone = l.load_zone + s.load_zone = l.load_zone AND s.year_hist = extract(year from l.date_time) AND s.year_fore = d.period) WHERE l.load_zone in %(load_zones)s @@ -229,7 +229,7 @@ def write_tables(**args): ######################### # fuels - + write_table('non_fuel_energy_sources.tab', """ SELECT DISTINCT fuel AS "NON_FUEL_ENERGY_SOURCES" FROM study_generator_info @@ -245,10 +245,10 @@ def write_tables(**args): ######################### # rps targets - + write_tab_file( - 'rps_targets.tab', - headers=('year', 'rps_target'), + 'rps_targets.tab', + headers=('year', 'rps_target'), data=[(y, args['rps_targets'][y]) for y in sorted(args['rps_targets'].keys())], arguments=args ) @@ -256,9 +256,9 @@ def write_tables(**args): ######################### # fuel_markets - # deflate HECO fuel scenarios to base year, and inflate EIA-based scenarios + # deflate HECO fuel scenarios to base year, and inflate EIA-based scenarios # from 2013 (forecast base year) to model base year. (ugh) - # TODO: add a flag to fuel_costs indicating whether forecasts are real or nominal, + # TODO: add a flag to fuel_costs indicating whether forecasts are real or nominal, # and base year, and possibly inflation rate. if args['fuel_scen_id'] in ('1', '2', '3'): # no base_year specified; these are in nominal dollars @@ -282,7 +282,7 @@ def write_tables(**args): else: lng_selector = "false" - write_table('fuel_cost.tab', + write_table('fuel_cost.tab', with_period_length + """ SELECT load_zone, fuel_type as fuel, p.period, avg(price_mmbtu * {inflator} + COALESCE(fixed_cost, 0.00)) as fuel_cost @@ -299,19 +299,19 @@ def write_tables(**args): # advanced fuel markets with LNG expansion options (used by forward-looking models) # (use fuel_markets module) write_table('regional_fuel_markets.tab', """ - SELECT DISTINCT concat('Hawaii_', fuel_type) AS regional_fuel_market, fuel_type AS fuel + SELECT DISTINCT concat('Hawaii_', fuel_type) AS regional_fuel_market, fuel_type AS fuel FROM fuel_costs WHERE load_zone in %(load_zones)s AND fuel_scen_id = %(fuel_scen_id)s; """, args) - write_table('fuel_supply_curves.tab', + write_table('fuel_supply_curves.tab', with_period_length + """ - SELECT concat('Hawaii_', fuel_type) as regional_fuel_market, - fuel_type as fuel, - tier, - p.period, - avg(price_mmbtu * {inflator}) as unit_cost, - avg(max_avail_at_cost) as max_avail_at_cost, + SELECT concat('Hawaii_', fuel_type) as regional_fuel_market, + fuel_type as fuel, + tier, + p.period, + avg(price_mmbtu * {inflator}) as unit_cost, + avg(max_avail_at_cost) as max_avail_at_cost, avg(fixed_cost) as fixed_cost, avg(max_age) as max_age FROM fuel_costs c, study_periods p JOIN period_length l USING (period) @@ -324,8 +324,8 @@ def write_tables(**args): """.format(inflator=inflator), args) write_table('zone_to_regional_fuel_market.tab', """ - SELECT DISTINCT load_zone, concat('Hawaii_', fuel_type) AS regional_fuel_market - FROM fuel_costs + SELECT DISTINCT load_zone, concat('Hawaii_', fuel_type) AS regional_fuel_market + FROM fuel_costs WHERE load_zone in %(load_zones)s AND fuel_scen_id = %(fuel_scen_id)s; """, args) @@ -342,7 +342,7 @@ def write_tables(**args): # we report the fuel as 'multiple' and then provide data in a multi-fuel table. # Some of these are actually single-fuel, but this approach is simpler than sorting # them out within each query, and it doesn't add any complexity to the model. - + if args.get('connect_cost_per_mw_km', 0): print( "WARNING: ignoring connect_cost_per_mw_km specified in arguments; using" @@ -397,10 +397,13 @@ def write_tables(**args): baseload as gen_is_baseload, -- 0 as gen_is_flexible_baseload, cogen as gen_is_cogen, - non_cycling as gen_non_cycling, + -- non_cycling as gen_non_cycling, variable_o_m * 1000.0 AS gen_variable_om, CASE WHEN fuel IN ('SUN', 'WND', 'MSW', 'Battery') THEN fuel ELSE 'multiple' END AS gen_energy_source, CASE WHEN fuel IN ('SUN', 'WND', 'MSW', 'Battery') THEN null ELSE {flhr} END AS gen_full_load_heat_rate, + min_uptime as gen_min_uptime, + min_downtime as gen_min_downtime, + startup_energy / unit_size as gen_startup_fuel, gen_storage_efficiency, gen_storage_energy_to_power_ratio, gen_storage_max_cycles_per_year @@ -420,10 +423,10 @@ def write_tables(**args): # NOTE: these costs must be expressed in $/MW, $/MWh or $/MW-year, # not $/kW, $/kWh or $/kW-year. - # NOTE: for now, we only specify storage costs per unit of power, not + # NOTE: for now, we only specify storage costs per unit of power, not # on per unit of energy, so we insert $0 as the energy cost here. # NOTE: projects should have NULL for overnight cost and fixed O&M in - # proj_existing_builds if they have an entry for the same year in + # proj_existing_builds if they have an entry for the same year in # generator_costs_by_year. If they have costs in both, they will both # get passed through to the data table, and Switch will raise an error # (as it should, because costs are ambiguous in this case). @@ -435,7 +438,7 @@ def write_tables(**args): c.capital_cost_per_kw * 1000.0 * power(1.0+%(inflation_rate)s, %(base_financial_year)s-c.base_year) AS gen_overnight_cost, - CASE WHEN i.gen_storage_efficiency IS NULL THEN NULL ELSE 0.0 END + CASE WHEN i.gen_storage_efficiency IS NULL THEN NULL ELSE 0.0 END AS gen_storage_energy_overnight_cost, i.fixed_o_m * 1000.0 * power(1.0+%(inflation_rate)s, %(base_financial_year)s-i.base_year) AS gen_fixed_o_m, @@ -448,10 +451,12 @@ def write_tables(**args): SELECT -- costs specified in proj_existing_builds "GENERATION_PROJECT", b.build_year, - SUM(b.proj_overnight_cost * 1000.0 * proj_existing_cap) / sum(proj_existing_cap) + power(1.0+%(inflation_rate)s, %(base_financial_year)s-b.base_year) + * SUM(b.proj_overnight_cost * 1000.0 * proj_existing_cap) / sum(proj_existing_cap) AS gen_overnight_cost, null AS gen_storage_energy_overnight_cost, - SUM(b.proj_fixed_om * 1000.0 * proj_existing_cap) / sum(proj_existing_cap) + power(1.0+%(inflation_rate)s, %(base_financial_year)s-b.base_year) + * SUM(b.proj_fixed_om * 1000.0 * proj_existing_cap) / sum(proj_existing_cap) AS gen_fixed_om FROM study_projects p JOIN proj_existing_builds b USING (project_id) @@ -459,9 +464,9 @@ def write_tables(**args): GROUP BY 1, 2 UNION SELECT -- costs specified in generator_costs_by_year - "GENERATION_PROJECT", c.build_year, gen_overnight_cost, + "GENERATION_PROJECT", c.build_year, gen_overnight_cost, gen_storage_energy_overnight_cost, gen_fixed_o_m - FROM study_projects proj + FROM study_projects proj JOIN gen_build_costs c USING (technology) LEFT JOIN study_periods per ON (per.time_sample = %(time_sample)s AND c.build_year = per.period) LEFT JOIN proj_existing_builds e ON (e.project_id = proj.project_id AND e.build_year = c.build_year) @@ -474,10 +479,6 @@ def write_tables(**args): ORDER BY 1, 2; """, args) - if args['base_financial_year'] != 2016: - print "WARNING: capital costs for existing plants were stored in the database with a 2016 base year" - print "WARNING: and have not been updated to the scenario base year of {}.".format(args['base_financial_year']) - ######################### # spinning_reserves_advanced (if wanted; otherwise defaults to just "spinning" if 'max_reserve_capability' in args or args.get('write_generation_projects_reserve_capability', False): @@ -526,7 +527,7 @@ def write_tables(**args): # get part load heat rate curves if requested # note: we sort lexicographically by power output and fuel consumption, in case # there are segments where power or fuel consumption steps up while the other stays constant - # That is nonconvex and not currently supported by SWITCH, but could potentially be used + # That is nonconvex and not currently supported by SWITCH, but could potentially be used # in the future by assigning binary variables for activating each segment. # note: for sqlite, you could use "CONCAT(technology, ' ', output_mw, ' ', fuel_consumption_mmbtu_per_h) AS key" # TODO: rename fuel_consumption_mmbtu_per_h to fuel_use_mmbtu_per_h here and in import_data.py @@ -534,10 +535,10 @@ def write_tables(**args): if args.get('use_incremental_heat_rates', False): write_table('gen_inc_heat_rates.tab', """ WITH part_load AS ( - SELECT + SELECT row_number() OVER (ORDER BY technology, output_mw, fuel_consumption_mmbtu_per_h) AS key, technology, - output_mw, + output_mw, fuel_consumption_mmbtu_per_h FROM part_load_fuel_consumption JOIN study_generator_info USING (technology) ), prior AS ( @@ -547,17 +548,17 @@ def write_tables(**args): ), curves AS ( SELECT -- first step in each curve key, technology, - output_mw AS power_start_mw, - NULL::real AS power_end_mw, + output_mw AS power_start_mw, + NULL::real AS power_end_mw, NULL::real AS incremental_heat_rate_mbtu_per_mwhr, fuel_consumption_mmbtu_per_h AS fuel_use_rate_mmbtu_per_h FROM part_load LEFT JOIN prior USING (key) WHERE prior_key IS NULL UNION SELECT -- additional steps high.key AS key, high.technology, - low.output_mw AS power_start_mw, + low.output_mw AS power_start_mw, high.output_mw AS power_end_mw, - (high.fuel_consumption_mmbtu_per_h - low.fuel_consumption_mmbtu_per_h) + (high.fuel_consumption_mmbtu_per_h - low.fuel_consumption_mmbtu_per_h) / (high.output_mw - low.output_mw) AS incremental_heat_rate_mbtu_per_mwhr, NULL::real AS fuel_use_rate_mmbtu_per_h FROM part_load high JOIN prior USING (key) JOIN part_load low ON (low.key = prior.prior_key) @@ -570,9 +571,9 @@ def write_tables(**args): FROM curves c JOIN study_projects p using (technology) ORDER BY c.technology, c.key, p."GENERATION_PROJECT"; """, args) - + # This gets a list of all the fueled projects (listed as "multiple" energy sources above), - # and lists them as accepting any equivalent or lighter fuel. (However, cogen plants and plants + # and lists them as accepting any equivalent or lighter fuel. (However, cogen plants and plants # using fuels with rank 0 are not changed.) Fuels are also filtered against the list of fuels with # costs reported for the current scenario, so this can end up re-mapping one fuel in the database # (e.g., LSFO) to a similar fuel in the scenario (e.g., LSFO-Diesel-Blend), even if the original fuel @@ -610,11 +611,11 @@ def write_tables(**args): print "SKIPPING variable_capacity_factors.tab" else: write_table('variable_capacity_factors.tab', """ - SELECT + SELECT "GENERATION_PROJECT", study_hour as timepoint, cap_factor as gen_max_capacity_factor - FROM study_generator_info g + FROM study_generator_info g JOIN study_projects p USING (technology) JOIN cap_factor c USING (project_id) JOIN study_hour h using (date_time) @@ -643,7 +644,7 @@ def write_tables(**args): SELECT * FROM ( SELECT "GENERATION_PROJECT", study_hour AS "TIMEPOINT", - CASE WHEN %(enable_must_run)s = 1 AND must_run = 1 THEN 1.0 ELSE null END + CASE WHEN %(enable_must_run)s = 1 AND must_run = 1 THEN 1.0 ELSE null END AS gen_min_commit_fraction, null AS gen_max_commit_fraction, null AS gen_min_load_fraction_TP @@ -667,26 +668,26 @@ def write_tables(**args): # trans_build # --- Not used --- - # + # # write_table('trans_lines.tab', """ - # SELECT load_area_start AS load_zone_start, load_area_end AS load_zone_end, + # SELECT load_area_start AS load_zone_start, load_area_end AS load_zone_end, # tid, length_km AS transmission_length_km, efficiency AS transmission_efficiency, - # existing_mw_from AS existing_transmission_from, - # existing_mw_to AS existing_transmission_to - # FROM trans_line + # existing_mw_from AS existing_transmission_from, + # existing_mw_to AS existing_transmission_to + # FROM trans_line # WHERE load_area_start IN %(load_zones)s OR load_area_end IN %(load_zones)s # """, args) - # - # - # + # + # + # ######################### # trans_dispatch # --- Not used --- - + ######################### # batteries - # (now included as standard storage projects, but kept here + # (now included as standard storage projects, but kept here # to support older projects that haven't upgraded yet) bat_years = 'BATTERY_CAPITAL_COST_YEARS' bat_cost = 'battery_capital_cost_per_mwh_capacity_by_year' @@ -711,8 +712,8 @@ def write_tables(**args): # print "ev_scenario:", args.get('ev_scenario', None) if args.get('ev_scenario', None) is not None: write_table('ev_fleet_info.tab', """ - SELECT load_zone as "LOAD_ZONE", period as "PERIOD", - ev_share, ice_miles_per_gallon, ev_miles_per_kwh, ev_extra_cost_per_vehicle_year, + SELECT load_zone as "LOAD_ZONE", period as "PERIOD", + ev_share, ice_miles_per_gallon, ev_miles_per_kwh, ev_extra_cost_per_vehicle_year, n_all_vehicles, vmt_per_vehicle FROM ev_adoption a JOIN study_periods p on a.year = p.period WHERE load_zone in %(load_zones)s @@ -723,26 +724,26 @@ def write_tables(**args): # power consumption for each hour of the day under business-as-usual charging # note: the charge weights have a mean value of 1.0, but go up and down in different hours write_table('ev_bau_load.tab', """ - SELECT - load_zone AS "LOAD_ZONE", + SELECT + load_zone AS "LOAD_ZONE", study_hour AS "TIMEPOINT", charge_weight * ev_share * n_all_vehicles * vmt_per_vehicle / (1000.0 * ev_miles_per_kwh) / 8760 as ev_bau_mw FROM ev_adoption e JOIN study_date d ON d.period = e.year - JOIN study_hour h USING (study_date, time_sample) - JOIN ev_hourly_charge_profile p + JOIN study_hour h USING (study_date, time_sample) + JOIN ev_hourly_charge_profile p ON p.hour_of_day = h.hour_of_day WHERE load_zone in %(load_zones)s AND time_sample = %(time_sample)s AND ev_scenario = %(ev_scenario)s ORDER BY 1, 2; """, args) - + ######################### # pumped hydro # TODO: put these data in a database with hydro_scen_id's and pull them from there - + if "pumped_hydro_headers" in args: write_tab_file( 'pumped_hydro.tab', @@ -766,12 +767,23 @@ def write_tables(**args): args ) + + ######################### + # PHA data + pha_params = sorted([k for k in args if k.startswith('pha_')]) + if pha_params: + write_dat_file( + 'pha.dat', + pha_params, + args + ) + # the two functions below could be used as the start of a system -# to write placeholder files for any files in the current scenario +# to write placeholder files for any files in the current scenario # that match the base files. This could be used to avoid creating large -# files (variable_cap_factor.tab) for alternative scenarios that are -# otherwise very similar. i.e., placeholder .tab or .dat files could -# be written with just the line 'include ../variable_cap_factor.tab' or +# files (variable_cap_factor.tab) for alternative scenarios that are +# otherwise very similar. i.e., placeholder .tab or .dat files could +# be written with just the line 'include ../variable_cap_factor.tab' or # 'include ../financial.dat'. def any_alt_args_in_list(args, l): @@ -780,13 +792,13 @@ def any_alt_args_in_list(args, l): if a in l: return True return False - + def any_alt_args_in_query(args, query): """Report whether any arguments in the args list appear in the list l.""" for a in args.get('alt_args', {}): if '%(' + a + ')s' in query: return True - return False + return False def make_file_path(file, args): """Create any directories and subdirectories needed to store data in the specified file, @@ -820,14 +832,14 @@ def db_cursor(): pghost='redr.eng.hawaii.edu' # note: the connection gets created when the module loads and never gets closed (until presumably python exits) con = psycopg2.connect(database='switch', host=pghost) #, user='switch_user') - + except psycopg2.OperationalError: print dedent(""" ############################################################################################ Error while connecting to switch database on postgres server {server}. Please ensure that the PGUSER environment variable is set with your postgres username - and there is a line like "*:*:*::" in ~/.pgpass (which should be chmod 0600) - or in %APPDATA%\postgresql\pgpass.conf (Windows). + and there is a line like "*:*:*::" in ~/.pgpass (which should be chmod 0600) + or in %APPDATA%\postgresql\pgpass.conf (Windows). See http://www.postgresql.org/docs/9.1/static/libpq-pgpass.html for more details. ############################################################################################ """.format(server=pghost)) @@ -835,9 +847,9 @@ def db_cursor(): return con.cursor() def write_dat_file(output_file, args_to_write, arguments): - """ write a simple .dat file with the arguments specified in args_to_write, + """ write a simple .dat file with the arguments specified in args_to_write, drawn from the arguments dictionary""" - + if any(arg in arguments for arg in args_to_write): output_file = make_file_path(output_file, arguments) print "Writing {file} ...".format(file=output_file), @@ -846,10 +858,10 @@ def write_dat_file(output_file, args_to_write, arguments): with open(output_file, 'w') as f: f.writelines([ - 'param ' + name + ' := ' + str(arguments[name]) + ';\n' + 'param ' + name + ' := ' + str(arguments[name]) + ';\n' for name in args_to_write if name in arguments ]) - + print "time taken: {dur:.2f}s".format(dur=time.time()-start) def write_table(output_file, query, arguments): @@ -888,7 +900,7 @@ def write_tab_file(output_file, headers, data, arguments={}): def write_indexed_set_dat_file(output_file, set_name, query, arguments): """Write a .dat file defining an indexed set, based on the query provided. - + Note: the query should produce a table with index values in all columns except the last, and then set members for each index in the last column. (There should be multiple rows with the same values in the index columns.)""" @@ -901,7 +913,7 @@ def write_indexed_set_dat_file(output_file, set_name, query, arguments): cur = db_cursor() cur.execute(dedent(query), arguments) - + # build a dictionary grouping all values (last column) according to their index keys (earlier columns) data_dict = collections.defaultdict(list) for r in cur: @@ -913,7 +925,7 @@ def write_indexed_set_dat_file(output_file, set_name, query, arguments): with open(output_file, 'w') as f: f.writelines([ 'set {sn}[{idx}] := {items} ;\n'.format( - sn=set_name, + sn=set_name, idx=', '.join(k), items=' '.join(v)) for k, v in data_dict.iteritems() From 4c0340eeb7e59fb9c16c97a09052197ee190ece9 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 27 Jun 2018 22:30:30 -1000 Subject: [PATCH 30/51] Assign NonNegativeReals domain for contingency variables in spinning_reserves_advanced, so they can't go negative when no contingencies are registered. --- .../outputs/total_cost.txt | 2 +- .../spinning_reserves_advanced.py | 188 +++++++++--------- 2 files changed, 95 insertions(+), 95 deletions(-) diff --git a/examples/production_cost_models/spinning_reserves_advanced/outputs/total_cost.txt b/examples/production_cost_models/spinning_reserves_advanced/outputs/total_cost.txt index c7c8c9c6b..2a19790fa 100644 --- a/examples/production_cost_models/spinning_reserves_advanced/outputs/total_cost.txt +++ b/examples/production_cost_models/spinning_reserves_advanced/outputs/total_cost.txt @@ -1 +1 @@ -28580571.2082 +28606194.7452 diff --git a/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py b/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py index c7aefe353..3bd106149 100644 --- a/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py +++ b/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py @@ -18,7 +18,7 @@ 'switch_model.energy_sources.properties', 'switch_model.generators.core.build', 'switch_model.generators.core.dispatch', - 'switch_model.generators.core.commit.operate', + 'switch_model.generators.core.commit.operate', ) @@ -40,7 +40,7 @@ def define_arguments(argparser): "This can be used alone or in combination with the other " "contingency options.") ) - group.add_argument('--spinning-requirement-rule', default=None, + group.add_argument('--spinning-requirement-rule', default=None, choices = ["Hawaii", "3+5", "none"], help=("Choose rules for spinning reserves requirements as a function " "of variable renewable power and load. Hawaii uses rules " @@ -53,7 +53,7 @@ def define_arguments(argparser): # TODO: define these inputs in data files group.add_argument( '--contingency-reserve-type', dest='contingency_reserve_type', - default='spinning', + default='spinning', help= "Type of reserves to use to meet the contingency reserve requirements " "defined for generation projects and sometimes for loss-of-load events " @@ -61,14 +61,14 @@ def define_arguments(argparser): ) group.add_argument( '--regulating-reserve-type', dest='regulating_reserve_type', - default='spinning', + default='spinning', help= "Type of reserves to use to meet the regulating reserve requirements " "defined by the spinning requirements rule (e.g., 'spinning' or " "'regulation'); default is 'spinning'." ) - - + + def define_dynamic_lists(m): @@ -80,27 +80,27 @@ def define_dynamic_lists(m): 'spinning'. In other scenarios where some generators are limited in what kind of reserves they can provide, you may have "regulation" and "contingency" reserve products. - The dicts are setup as defaultdicts, so they will automatically - return an empty list if nothing has been added for a particular + The dicts are setup as defaultdicts, so they will automatically + return an empty list if nothing has been added for a particular type of reserves. - + Spinning_Reserve_Up_Requirements and Spinning_Reserve_Down_Requirements list model components that increase reserve requirements in each balancing - area and timepoint. - + area and timepoint. + Spinning_Reserve_Up_Provisions and Spinning_Reserve_Down_Provisions list model components that help satisfy spinning reserve requirements in - each balancing area and timepoint. - + each balancing area and timepoint. + Spinning_Reserve_Up_Contingencies and Spinning_Reserve_Down_Contingencies list model components describing maximum contingency events. Elements of this list are summarized into a MaximumContingency variable that is added to the Spinning_Reserve_Requirements['contingency'] list. - - Each component in the Requirements and Provisions lists needs to use units - of MW and be indexed by reserve type, balancing area and timepoint. Missing + + Each component in the Requirements and Provisions lists needs to use units + of MW and be indexed by reserve type, balancing area and timepoint. Missing entries will be treated as zero (no reserves required or no reserves available). - + Each component in the Contingencies list should be in MW and indexed by (ba, tp) in BALANCING_AREA_TIMEPOINTS. """ @@ -120,7 +120,7 @@ def gen_fixed_contingency(m): that is usually online and/or reserves are cheap). """ m.GenFixedContingency = Param( - m.BALANCING_AREA_TIMEPOINTS, + m.BALANCING_AREA_TIMEPOINTS, initialize=lambda m: m.options.fixed_contingency ) m.Spinning_Reserve_Up_Contingencies.append('GenFixedContingency') @@ -133,27 +133,27 @@ def gen_unit_contingency(m): specified. Caution, this adds binary variables to the model for every GEN_TPS for DISCRETELY_SIZED_GENS. This many binary variables can impact runtime. - + UNIT_CONTINGENCY_DISPATCH_POINTS is a subset of GEN_TPS for DISCRETELY_SIZED_GENS - + GenIsCommitted[(g,t) in UNIT_CONTINGENCY_DISPATCH_POINTS] is a binary variable that tracks whether generation projects at least one units committed. - + Enforce_GenIsCommitted[(g,t) in UNIT_CONTINGENCY_DISPATCH_POINTS] is a constraint that enforces the tracking behavior of GenIsCommitted. - + GenUnitLargestContingency[(b,t) in BALANCING_AREA_TIMEPOINTS] is a variable that tracks the size of the largest contingency in each balancing area, accounting for all of the discretely sized units that are currently committed. This is added to the dynamic list Spinning_Reserve_Contingencies. - + Enforce_GenUnitLargestContingency[(g,t) in UNIT_CONTINGENCY_DISPATCH_POINTS] is a constraint that enforces the behavior of GenUnitLargestContingency, by making GenUnitLargestContingency >= the capacity of each of the committed units in its balancing area. - + """ # UNIT_CONTINGENCY_DISPATCH_POINTS duplicates # DISCRETE_GEN_TPS from generators.core.commit.discrete. I @@ -161,7 +161,7 @@ def gen_unit_contingency(m): # should be a prerequisite for this functionality. m.UNIT_CONTINGENCY_DISPATCH_POINTS = Set( dimen=2, - initialize=lambda m: + initialize=lambda m: [(g, t) for g in m.DISCRETELY_SIZED_GENS for t in m.TPS_FOR_GEN[g]] ) m.GenIsCommitted = Var( @@ -171,22 +171,22 @@ def gen_unit_contingency(m): ) m.Enforce_GenIsCommitted = Constraint( m.UNIT_CONTINGENCY_DISPATCH_POINTS, - rule=lambda m, g, tp: + rule=lambda m, g, tp: m.CommitGen[g, tp] <= m.GenIsCommitted[g, tp] * ( - m._gen_max_cap_for_binary_constraints + m._gen_max_cap_for_binary_constraints if g not in m.CAPACITY_LIMITED_GENS else m.gen_capacity_limit_mw[g] ) ) - # TODO: would it be faster to add all generator contingencies directly + # TODO: would it be faster to add all generator contingencies directly # to Spinning_Reserve_Contingencies instead of introducing this intermediate # variable and constraint? m.GenUnitLargestContingency = Var( - m.BALANCING_AREA_TIMEPOINTS, + m.BALANCING_AREA_TIMEPOINTS, within=NonNegativeReals, doc="Largest generating unit that could drop offline.") def Enforce_GenUnitLargestContingency_rule(m, g, t): b = m.zone_balancing_area[m.gen_load_zone[g]] - return (m.GenUnitLargestContingency[b,t] >= + return (m.GenUnitLargestContingency[b,t] >= m.GenIsCommitted[g, t] * m.gen_unit_size[g]) m.Enforce_GenUnitLargestContingency = Constraint( m.UNIT_CONTINGENCY_DISPATCH_POINTS, @@ -204,19 +204,19 @@ def gen_project_contingency(m): units. This will model contingencies of entire generation projects - basically entire plants tripping offline, rather than individual generation units in a plan tripping offline. - + GenProjectLargestContingency[(b,t) in BALANCING_AREA_TIMEPOINTS] is a variable that tracks the size of the largest contingency in each balancing - area, accounting for all of the capacity that is committed. This is + area, accounting for all of the capacity that is committed. This is added to the dynamic list Spinning_Reserve_Contingencies. - + Enforce_GenProjectLargestContingency[(g,t) in GEN_TPS] is a constraint that enforces the behavior of GenProjectLargestContingency by making - GenProjectLargestContingency >= DispatchGen + GenProjectLargestContingency >= DispatchGen for each generation project in a balancing area. If a generation project is capable of providing upward reserves, then CommitGenSpinningReservesUp is added to the right hand side. - + """ m.GenProjectLargestContingency = Var( m.BALANCING_AREA_TIMEPOINTS, @@ -225,7 +225,7 @@ def Enforce_GenProjectLargestContingency_rule(m, g, t): b = m.zone_balancing_area[m.gen_load_zone[g]] if g in m.SPINNING_RESERVE_CAPABLE_GENS: total_up_reserves = sum( - m.CommitGenSpinningReservesUp[rt, g, t] + m.CommitGenSpinningReservesUp[rt, g, t] for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g]) return m.GenProjectLargestContingency[b, t] >= \ m.DispatchGen[g, t] + total_up_reserves @@ -245,8 +245,8 @@ def hawaii_spinning_reserve_requirements(m): # Dropbox/Research/Shared/Switch-Hawaii/ge_validation/source_data/ # reserve_requirements_oahu_scenarios charts.xlsx and # Dropbox/Research/Shared/Switch-Hawaii/ge_validation/ - # fit_renewable_reserves.ipynb ) - # TODO: supply all the parameters for this function in input files. + # fit_renewable_reserves.ipynb ) + # TODO: supply all the parameters for this function in input files. # Calculate and register regulating reserve requirements # (currently only considers variable generation, only underforecasting) @@ -267,17 +267,17 @@ def var_gen_cap_reserve_limit_default(m, g): "Unable to calculate reserve requirement for energy source {}".format(m.gen_energy_source[g]) ) m.var_gen_cap_reserve_limit = Param( - m.VARIABLE_GENS, + m.VARIABLE_GENS, default=var_gen_cap_reserve_limit_default, doc="Maximum spinning reserves required, as fraction of installed capacity" ) m.HawaiiVarGenUpSpinningReserveRequirement = Expression( [m.options.regulating_reserve_type], - m.BALANCING_AREA_TIMEPOINTS, + m.BALANCING_AREA_TIMEPOINTS, rule=lambda m, rt, b, t: sum( - m.GenCapacityInTP[g, t] + m.GenCapacityInTP[g, t] * min( - m.var_gen_power_reserve[g] * m.gen_max_capacity_factor[g, t], + m.var_gen_power_reserve[g] * m.gen_max_capacity_factor[g, t], m.var_gen_cap_reserve_limit[g] ) for z in m.ZONES_IN_BALANCING_AREA[b] @@ -320,16 +320,16 @@ def NREL35VarGenSpinningReserveRequirement_rule(m, rt, b, t): except AttributeError: load = m.zone_demand_mw return ( - 0.03 * sum(load[z, t] + 0.03 * sum(load[z, t] for z in m.LOAD_ZONES if b == m.zone_balancing_area[z]) - + 0.05 * sum(m.DispatchGen[g, t] + + 0.05 * sum(m.DispatchGen[g, t] for g in m.VARIABLE_GENS - if (g, t) in m.VARIABLE_GEN_TPS and + if (g, t) in m.VARIABLE_GEN_TPS and b == m.zone_balancing_area[m.gen_load_zone[g]])) m.NREL35VarGenSpinningReserveRequirement = Expression( [m.options.regulating_reserve_type], - m.BALANCING_AREA_TIMEPOINTS, + m.BALANCING_AREA_TIMEPOINTS, rule=NREL35VarGenSpinningReserveRequirement_rule ) m.Spinning_Reserve_Up_Requirements.append('NREL35VarGenSpinningReserveRequirement') @@ -338,37 +338,37 @@ def NREL35VarGenSpinningReserveRequirement_rule(m, rt, b, t): def define_components(m): """ - contingency_safety_factor is a parameter that increases the contingency + contingency_safety_factor is a parameter that increases the contingency requirements. This is defaults to 1.0. - + GEN_SPINNING_RESERVE_TYPES is a set of all allowed reserve types for each generation project. This is read from generation_projects_reserve_availability.tab. If that file doesn't exist, this defaults to GENERATION_PROJECTS x {"spinning"} - gen_reserve_type_max_share specifies the maximum amount of committed + gen_reserve_type_max_share specifies the maximum amount of committed capacity that can be used to provide each type of reserves. It is indexed by GEN_SPINNING_RESERVE_TYPES. This is read from generation_projects_reserve_availability.tab and defaults to 1 if not specified. SPINNING_RESERVE_CAPABLE_GEN_TPS is a subset of GEN_TPS of generators that can provide spinning reserves based on generation_projects_reserve_capability.tab. - + CommitGenSpinningReservesUp[(g,t) in SPINNING_SPINNING_RESERVE_CAPABLE_GEN_TPS] is a decision variable of how much upward spinning reserve capacity to commit (in MW). - + CommitGenSpinningReservesDown[(g,t) in SPINNING_SPINNING_RESERVE_CAPABLE_GEN_TPS] is a corresponding variable for downward spinning reserves. CommitGenSpinningReservesUp_Limit[(g,t) in SPINNING_SPINNING_RESERVE_CAPABLE_GEN_TPS] and CommitGenSpinningReservesDown_Limit constraint the CommitGenSpinningReserves variables based on DispatchSlackUp and DispatchSlackDown. - + CommittedSpinningReserveUp[(b,t) in BALANCING_AREA_TIMEPOINTS] and CommittedSpinningReserveDown are expressions summarizing the CommitGenSpinningReserves variables for generators within each balancing area. - + Depending on the configuration parameters unit_contingency, project_contingency and spinning_requirement_rule, other components may be added by other functions which are documented above. @@ -380,11 +380,11 @@ def define_components(m): m.GEN_SPINNING_RESERVE_TYPES = Set(dimen=2) m.gen_reserve_type_max_share = Param( - m.GEN_SPINNING_RESERVE_TYPES, - within=PercentFraction, + m.GEN_SPINNING_RESERVE_TYPES, + within=PercentFraction, default=1.0 ) - + # reserve types that are supplied by generation projects # and generation projects that can provide reserves # note: these are also the indexing sets of the above set arrays; maybe that could be used? @@ -401,14 +401,14 @@ def rule(m): m.build_spinning_reserve_indexed_sets = BuildAction(rule=rule) m.SPINNING_RESERVE_TYPES_FOR_GEN = Set( - m.SPINNING_RESERVE_CAPABLE_GENS, + m.SPINNING_RESERVE_CAPABLE_GENS, rule=lambda m, g: m.SPINNING_RESERVE_TYPES_FOR_GEN_dict.pop(g) ) m.GENS_FOR_SPINNING_RESERVE_TYPE = Set( - m.SPINNING_RESERVE_TYPES_FROM_GENS, + m.SPINNING_RESERVE_TYPES_FROM_GENS, rule=lambda m, rt: m.GENS_FOR_SPINNING_RESERVE_TYPE_dict.pop(rt) ) - + # types, generators and timepoints when reserves could be supplied m.SPINNING_RESERVE_TYPE_GEN_TPS = Set(dimen=3, initialize=lambda m: ( (rt, g, tp) @@ -421,8 +421,8 @@ def rule(m): for g in m.SPINNING_RESERVE_CAPABLE_GENS for tp in m.TPS_FOR_GEN[g] )) - - # decide how much of each type of reserves to produce from each generator + + # decide how much of each type of reserves to produce from each generator # during each timepoint m.CommitGenSpinningReservesUp = Var(m.SPINNING_RESERVE_TYPE_GEN_TPS, within=NonNegativeReals) m.CommitGenSpinningReservesDown = Var(m.SPINNING_RESERVE_TYPE_GEN_TPS, within=NonNegativeReals) @@ -430,27 +430,27 @@ def rule(m): # constrain reserve provision appropriately m.CommitGenSpinningReservesUp_Limit = Constraint( m.SPINNING_RESERVE_CAPABLE_GEN_TPS, - rule=lambda m, g, tp: + rule=lambda m, g, tp: sum(m.CommitGenSpinningReservesUp[rt, g, tp] for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g]) - <= - m.DispatchSlackUp[g, tp] + <= + m.DispatchSlackUp[g, tp] # storage can give more up response by stopping charging + (m.ChargeStorage[g, tp] if g in getattr(m, 'STORAGE_GENS', []) else 0.0) ) m.CommitGenSpinningReservesDown_Limit = Constraint( m.SPINNING_RESERVE_CAPABLE_GEN_TPS, - rule=lambda m, g, tp: + rule=lambda m, g, tp: sum(m.CommitGenSpinningReservesDown[rt, g, tp] for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[g]) - <= + <= m.DispatchSlackDown[g, tp] + ( # storage could give more down response by raising ChargeStorage to the maximum rate - (m.DispatchUpperLimit[g, tp] * m.gen_store_to_release_ratio[g] - m.ChargeStorage[g, tp]) - if g in getattr(m, 'STORAGE_GENS', []) + (m.DispatchUpperLimit[g, tp] * m.gen_store_to_release_ratio[g] - m.ChargeStorage[g, tp]) + if g in getattr(m, 'STORAGE_GENS', []) else 0.0 ) ) - # Calculate total spinning reserves from generation projects, + # Calculate total spinning reserves from generation projects, # and add to the list of reserve provisions def rule(m): d = m.TotalGenSpinningReserves_dict = defaultdict(float) @@ -462,21 +462,21 @@ def rule(m): m.TotalGenSpinningReserves_aggregate = BuildAction(rule=rule) m.TotalGenSpinningReservesUp = Expression( - m.SPINNING_RESERVE_TYPES_FROM_GENS, + m.SPINNING_RESERVE_TYPES_FROM_GENS, m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, rt, ba, tp: + rule=lambda m, rt, ba, tp: m.TotalGenSpinningReserves_dict.pop((rt, 'up', ba, tp), 0.0) ) m.TotalGenSpinningReservesDown = Expression( - m.SPINNING_RESERVE_TYPES_FROM_GENS, + m.SPINNING_RESERVE_TYPES_FROM_GENS, m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, rt, ba, tp: + rule=lambda m, rt, ba, tp: m.TotalGenSpinningReserves_dict.pop((rt, 'down', ba, tp), 0.0) ) m.Spinning_Reserve_Up_Provisions.append('TotalGenSpinningReservesUp') m.Spinning_Reserve_Down_Provisions.append('TotalGenSpinningReservesDown') - + # define reserve requirements if m.options.fixed_contingency: gen_fixed_contingency(m) @@ -492,7 +492,7 @@ def rule(m): pass # users can turn off the rules and use their own instead else: raise ValueError('No --spinning-requirement-rule specified on command line; unable to allocate reserves.') - + def define_dynamic_components(m): """ @@ -504,12 +504,12 @@ def define_dynamic_components(m): BALANCING_AREA_TIMEPOINT_CONTINGENCIES is a set of (b, t, contingency) formed from the cross product of the set BALANCING_AREA_TIMEPOINTS and the dynamic list Spinning_Reserve_Contingencies. - + Enforce_MaximumContingency[(b,t,contingency) in BALANCING_AREA_TIMEPOINT_CONTINGENCIES] is a constraint that enforces the behavior of MaximumContingency by making - MaximumContingency >= contingency for each contingency registered in the + MaximumContingency >= contingency for each contingency registered in the dynamic list Spinning_Reserve_Contingencies. - + Satisfy_Spinning_Reserve_Up_Requirement[(b,t) in BALANCING_AREA_TIMEPOINTS] is a constraint that ensures upward spinning reserve requirements are being satisfied based on the sums of the two dynamic lists @@ -523,44 +523,44 @@ def define_dynamic_components(m): # define largest contingencies m.MaximumContingencyUp = Var( - m.BALANCING_AREA_TIMEPOINTS, + m.BALANCING_AREA_TIMEPOINTS, within=NonNegativeReals, doc=("Maximum of the registered Spinning_Reserve_Up_Contingencies, after " "multiplying by contingency_safety_factor.") ) m.MaximumContingencyDown = Var( - m.BALANCING_AREA_TIMEPOINTS, + m.BALANCING_AREA_TIMEPOINTS, within=NonNegativeReals, doc=("Maximum of the registered Spinning_Reserve_Down_Contingencies, after " "multiplying by contingency_safety_factor.") ) m.Calculate_MaximumContingencyUp = Constraint( m.BALANCING_AREA_TIMEPOINTS, m.Spinning_Reserve_Up_Contingencies, # list of contingency events - rule=lambda m, b, t, contingency: + rule=lambda m, b, t, contingency: m.MaximumContingencyUp[b, t] >= m.contingency_safety_factor * getattr(m, contingency)[b, t] ) m.Calculate_MaximumContingencyDown = Constraint( m.BALANCING_AREA_TIMEPOINTS, m.Spinning_Reserve_Down_Contingencies, # list of contingency events - rule=lambda m, b, t, contingency: + rule=lambda m, b, t, contingency: m.MaximumContingencyDown[b, t] >= m.contingency_safety_factor * getattr(m, contingency)[b, t] ) - + # create reserve requirements equal to the largest contingencies # (these could eventually be region-specific) m.MaximumContingencyUpRequirement = Expression( - [m.options.contingency_reserve_type], + [m.options.contingency_reserve_type], m.BALANCING_AREA_TIMEPOINTS, rule=lambda m, rt, ba, tp: m.MaximumContingencyUp[ba, tp] ) m.MaximumContingencyDownRequirement = Expression( - [m.options.contingency_reserve_type], + [m.options.contingency_reserve_type], m.BALANCING_AREA_TIMEPOINTS, rule=lambda m, rt, ba, tp: m.MaximumContingencyDown[ba, tp] ) - + m.Spinning_Reserve_Up_Requirements.append('MaximumContingencyUpRequirement') m.Spinning_Reserve_Down_Requirements.append('MaximumContingencyDownRequirement') - + # aggregate the requirements for each type of reserves during each timepoint def rule(m): def makedict(m, lst): @@ -588,16 +588,16 @@ def makedict(m, lst): # satisfy all spinning reserve requirements m.Satisfy_Spinning_Reserve_Up_Requirement = Constraint( m.SPINNING_RESERVE_REQUIREMENT_UP_BALANCING_AREA_TIMEPOINTS, - rule=lambda m, rt, ba, tp: + rule=lambda m, rt, ba, tp: m.Spinning_Reserve_Up_Provisions_dict.pop((rt, ba, tp), 0.0) - >= + >= m.Spinning_Reserve_Up_Requirements_dict.pop((rt, ba, tp)) ) m.Satisfy_Spinning_Reserve_Down_Requirement = Constraint( m.SPINNING_RESERVE_REQUIREMENT_DOWN_BALANCING_AREA_TIMEPOINTS, - rule=lambda m, rt, ba, tp: + rule=lambda m, rt, ba, tp: m.Spinning_Reserve_Down_Provisions_dict.pop((rt, ba, tp), 0.0) - >= + >= m.Spinning_Reserve_Down_Requirements_dict.pop((rt, ba, tp)) ) @@ -605,11 +605,11 @@ def makedict(m, lst): def load_inputs(m, switch_data, inputs_dir): """ All files & columns are optional. - + generation_projects_reserve_capability.tab GENERATION_PROJECTS, RESERVE_TYPES, [gen_reserve_type_max_share] - - spinning_reserve_params.dat may override the default value of + + spinning_reserve_params.dat may override the default value of contingency_safety_factor. Note that this is a .dat file, not a .tab file. """ path=os.path.join(inputs_dir, 'generation_projects_reserve_capability.tab') From af00c83b334fb3d9d7b8c017529c58f1af27422e Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 27 Jun 2018 22:44:24 -1000 Subject: [PATCH 31/51] Bug fix in inflation adjustment query in hawaii.scenario_data --- switch_model/hawaii/scenario_data.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/switch_model/hawaii/scenario_data.py b/switch_model/hawaii/scenario_data.py index cf7319760..3ba40224b 100644 --- a/switch_model/hawaii/scenario_data.py +++ b/switch_model/hawaii/scenario_data.py @@ -451,12 +451,16 @@ def write_tables(**args): SELECT -- costs specified in proj_existing_builds "GENERATION_PROJECT", b.build_year, - power(1.0+%(inflation_rate)s, %(base_financial_year)s-b.base_year) - * SUM(b.proj_overnight_cost * 1000.0 * proj_existing_cap) / sum(proj_existing_cap) + SUM( + power(1.0+%(inflation_rate)s, %(base_financial_year)s-b.base_year) + * b.proj_overnight_cost * 1000.0 * proj_existing_cap + ) / SUM(proj_existing_cap) AS gen_overnight_cost, null AS gen_storage_energy_overnight_cost, - power(1.0+%(inflation_rate)s, %(base_financial_year)s-b.base_year) - * SUM(b.proj_fixed_om * 1000.0 * proj_existing_cap) / sum(proj_existing_cap) + SUM( + power(1.0+%(inflation_rate)s, %(base_financial_year)s-b.base_year) + * b.proj_fixed_om * 1000.0 * proj_existing_cap + ) / SUM(proj_existing_cap) AS gen_fixed_om FROM study_projects p JOIN proj_existing_builds b USING (project_id) From 6588373100f48c82014fb925f2996941e9ea74f6 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 28 Jun 2018 13:14:59 -1000 Subject: [PATCH 32/51] Consider '--verbose' flag in solve_scenarios. --- switch_model/solve_scenarios.py | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/switch_model/solve_scenarios.py b/switch_model/solve_scenarios.py index 011e557ee..44eb9b604 100755 --- a/switch_model/solve_scenarios.py +++ b/switch_model/solve_scenarios.py @@ -35,7 +35,7 @@ # Parse scenario-manager-related command-line arguments. # Other command-line arguments will be passed through to solve.py via scenario_cmd_line_args parser = _ArgumentParser( - allow_abbrev=False, description='Solve one or more SWITCH scenarios.' + allow_abbrev=False, description='Solve one or more Switch scenarios.' ) parser.add_argument('--scenario', '--scenarios', nargs='+', dest='scenarios', default=[]) #parser.add_argument('--scenarios', nargs='+', default=[]) @@ -43,8 +43,10 @@ parser.add_argument("--scenario-queue", default="scenario_queue") parser.add_argument("--job-id", default=None) -#import pdb; pdb.set_trace() +# import pdb; pdb.set_trace() +# get a namespace object with successfully parsed scenario manager arguments scenario_manager_args = parser.parse_known_args(args=option_file_args + cmd_line_args)[0] +# get lists of other arguments to pass through to standard solve routine scenario_option_file_args = parser.parse_known_args(args=option_file_args)[1] scenario_cmd_line_args = parser.parse_known_args(args=cmd_line_args)[1] @@ -168,23 +170,26 @@ def scenarios_to_run(): # This list is found by retrieving the names of the lock-directories. already_run = filter(os.path.isdir, os.listdir(".")) for scenario_name, base_args in get_scenario_dict().items(): + scenario_args = scenario_option_file_args + base_args + scenario_cmd_line_args if scenario_name not in already_run and checkout(scenario_name): # run this scenario, then start again at the top of the list ran.append(scenario_name) - scenario_args = scenario_option_file_args + base_args + scenario_cmd_line_args yield (scenario_name, scenario_args) all_done = False break else: if scenario_name not in skipped and scenario_name not in ran: skipped.append(scenario_name) - print("Skipping {} because it was already run.".format(scenario_name)) + if is_verbose(scenario_args): + print("Skipping {} because it was already run.".format(scenario_name)) # move on to the next candidate # no more scenarios to run if skipped and not ran: print( - "Please remove the {sq} directory or its contents if you would like to " - "run these scenarios again. (rm -rf {sq})".format(sq=scenario_queue_dir) + "Skipping all scenarios because they have already been solved. " + "If you would like to run these scenarios again, " + "please remove the {sq} directory or its contents. (rm -rf {sq})" + .format(sq=scenario_queue_dir) ) return @@ -205,6 +210,11 @@ def get_scenario_name(scenario_args): # use ad-hoc parsing to extract the scenario name from a scenario-definition string return parse_arg("--scenario-name", default=None, args=scenario_args) +def is_verbose(scenario_args): + # check options settings for --verbose flag + # note: this duplicates settings in switch_model.solve, so it may fall out of date + return parse_arg("--verbose", default=False, args=scenario_args) + def get_scenario_dict(): # note: we read the list from the disk each time so that we get a fresher version # if the standard list is changed during a long solution effort. From da5b0813546b392cafe8ab7dae352b2d22e6a432 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 28 Jun 2018 17:10:38 -1000 Subject: [PATCH 33/51] Sort IIS before showing it. --- switch_model/solve.py | 59 +++++++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 28 deletions(-) diff --git a/switch_model/solve.py b/switch_model/solve.py index 107b16c63..d0a993aa8 100755 --- a/switch_model/solve.py +++ b/switch_model/solve.py @@ -15,13 +15,13 @@ def main(args=None, return_model=False, return_instance=False): start_time = time.time() if args is None: - # combine default arguments read from options.txt file with + # combine default arguments read from options.txt file with # additional arguments specified on the command line args = get_option_file_args(extra_args=sys.argv[1:]) # Get options needed before any modules are loaded pre_module_options = parse_pre_module_options(args) - + # turn on post-mortem debugging mode if requested # (from http://stackoverflow.com/a/1237407 ; more options available there) if pre_module_options.debug: @@ -62,7 +62,7 @@ def debug(type, value, tb): # build a module list based on configuration options, and add # the current module (to register define_arguments callback) modules = get_module_list(args) - + # Patch pyomo if needed, to allow reconstruction of expressions. # This must be done before the model is constructed. patch_pyomo() @@ -72,7 +72,7 @@ def debug(type, value, tb): # Add any suffixes specified on the command line (usually only iis) add_extra_suffixes(model) - + # return the model as-is if requested if return_model and not return_instance: return model @@ -83,7 +83,7 @@ def debug(type, value, tb): # get a list of modules to iterate through iterate_modules = get_iteration_list(model) - + if model.options.verbose: creation_time = time.time() print "\n=======================================================================" @@ -101,7 +101,7 @@ def debug(type, value, tb): instantiation_time = time.time() if model.options.verbose: print "Inputs loaded in {:.2f} s.\n".format(instantiation_time - creation_time) - + # return the instance as-is if requested if return_instance: if return_model: @@ -209,20 +209,20 @@ def new_construct(self, *args, **kwargs): def iterate(m, iterate_modules, depth=0): """Iterate through all modules listed in the iterate_list (usually iterate.txt), if any. If there is no iterate_list, then this will just solve the model once. - + If it exists, the iterate_list contains one row per level of iteration, and each row contains a list of modules to test for iteration at that level (these can be separated with commas, spaces or tabs). - The model will run through the levels like nested loops, running the lowest level + The model will run through the levels like nested loops, running the lowest level till it converges, then advancing the next higher level by one step, then running the lowest level to convergence/completion again, repeating until all levels are complete. During each iteration, the pre_iterate() and post_iterate() functions of each specified - module (if they exist) will be called before and after solving. When a module is - converged or completed, its post_iterate() function should return True. + module (if they exist) will be called before and after solving. When a module is + converged or completed, its post_iterate() function should return True. All modules specified in the iterate_list should also be loaded via the module_list or include_module(s) arguments. """ - + # create or truncate the iteration tree if depth == 0: m.iteration_node = tuple() @@ -235,11 +235,11 @@ def iterate(m, iterate_modules, depth=0): else: # iterate until converged at the current level - # note: the modules in iterate_modules were also specified in the model's + # note: the modules in iterate_modules were also specified in the model's # module list, and have already been loaded, so they are accessible via sys.modules # This prepends 'switch_model.' if needed, to be consistent with modules.txt. current_modules = [ - sys.modules[module_name if module_name in sys.modules else 'switch_model.' + module_name] + sys.modules[module_name if module_name in sys.modules else 'switch_model.' + module_name] for module_name in iterate_modules[depth]] j = 0 @@ -259,7 +259,7 @@ def iterate(m, iterate_modules, depth=0): # converge the deeper-level modules, if any (inner loop) iterate(m, iterate_modules, depth=depth+1) - + # post-iterate modules at this level m.iteration_number = j # may have been changed during iterate() m.iteration_node = m.iteration_node[:depth] + (j,) @@ -274,8 +274,8 @@ def iterate(m, iterate_modules, depth=0): return def iterate_module_func(m, module, func, converged): - """Call function func() in specified module (if available) and use the result to - adjust model convergence status. If func doesn't exist or returns None, convergence + """Call function func() in specified module (if available) and use the result to + adjust model convergence status. If func doesn't exist or returns None, convergence status will not be changed.""" module_converged = None iter_func = getattr(module, func, None) @@ -286,7 +286,7 @@ def iterate_module_func(m, module, func, converged): return converged else: return converged and module_converged - + def define_arguments(argparser): # callback function to define model configuration arguments while the model is built @@ -322,14 +322,14 @@ def define_arguments(argparser): # Define solver-related arguments # These are a subset of the arguments offered by "pyomo solve --solver=cplex --help" - argparser.add_argument("--solver", default="glpk", + argparser.add_argument("--solver", default="glpk", help='Name of Pyomo solver to use for the model (default is "glpk")') argparser.add_argument("--solver-manager", default="serial", help='Name of Pyomo solver manager to use for the model ("neos" to use remote NEOS server)') argparser.add_argument("--solver-io", default=None, help="Method for Pyomo to use to communicate with solver") # note: pyomo has a --solver-options option but it is not clear # whether that does the same thing as --solver-options-string so we don't reuse the same name. - argparser.add_argument("--solver-options-string", default=None, + argparser.add_argument("--solver-options-string", default=None, help='A quoted string of options to pass to the model solver. Each option must be of the form option=value. ' '(e.g., --solver-options-string "mipgap=0.001 primalopt advance=2 threads=1")') argparser.add_argument("--keepfiles", action='store_true', default=None, @@ -338,7 +338,7 @@ def define_arguments(argparser): "--stream-output", "--stream-solver", action='store_true', dest="tee", default=None, help="Display information from the solver about its progress (usually combined with a suitable --solver-options string)") argparser.add_argument( - "--symbolic-solver-labels", action='store_true', default=None, + "--symbolic-solver-labels", action='store_true', default=None, help='Use symbol names derived from the model when interfacing with the solver. ' 'See "pyomo solve --solver=x --help" for more details.') argparser.add_argument("--tempdir", default=None, @@ -360,6 +360,9 @@ def define_arguments(argparser): argparser.add_argument( '--verbose', '-v', default=False, action='store_true', help='Show information about model preparation and solution') + # argparser.add_argument( + # '--quiet', '-q', dest='verbose', action='store_false', + # help="Don't show information about model preparation and solution (cancels --verbose setting)") argparser.add_argument( '--interact', default=False, action='store_true', help='Enter interactive shell after solving the instance to enable inspection of the solved model.') @@ -370,7 +373,7 @@ def define_arguments(argparser): def add_module_args(parser): parser.add_argument( - "--module-list", default=None, + "--module-list", default=None, help='Text file with a list of modules to include in the model (default is "modules.txt")' ) parser.add_argument( @@ -381,9 +384,9 @@ def add_module_args(parser): "--exclude-modules", "--exclude-module", dest="exclude_modules", nargs='+', default=[], help="Module(s) to remove from the model after processing --module-list and --include-modules" ) - # note: we define --inputs-dir here because it may be used to specify the location of + # note: we define --inputs-dir here because it may be used to specify the location of # the module list, which is needed before it is loaded. - parser.add_argument("--inputs-dir", default="inputs", + parser.add_argument("--inputs-dir", default="inputs", help='Directory containing input files (default is "inputs")') @@ -447,7 +450,7 @@ def get_module_list(args): # remove modules requested by the user for module_name in module_options.exclude_modules: modules.remove(module_name) - + # add the current module, since it has callbacks, e.g. define_arguments for iteration and suffixes modules.append("switch_model.solve") @@ -511,7 +514,7 @@ def solve(model): model.solver = SolverFactory(model.options.solver, solver_io=model.options.solver_io) # patch for Pyomo < 4.2 - # note: Pyomo added an options_string argument to solver.solve() in Pyomo 4.2 rev 10587. + # note: Pyomo added an options_string argument to solver.solve() in Pyomo 4.2 rev 10587. # (See https://software.sandia.gov/trac/pyomo/browser/pyomo/trunk/pyomo/opt/base/solvers.py?rev=10587 ) # This is misreported in the documentation as options=, but options= actually accepts a dictionary. if model.options.solver_options_string and not hasattr(model.solver, "_options_string_to_dict"): @@ -545,7 +548,7 @@ def solve(model): # while i is not None: # c, i = m._decl_order[i] # solver_args[suffixes].append(c.name) - + # patch for Pyomo < 4.2 if not hasattr(model.solver, "_options_string_to_dict"): solver_args.pop("options_string", "") @@ -554,7 +557,7 @@ def solve(model): if model.options.verbose: solve_start_time = time.time() print "\nSolving model..." - + if model.options.tempdir is not None: # from https://software.sandia.gov/downloads/pub/pyomo/PyomoOnlineDocs.html#_changing_the_temporary_directory from pyutilib.services import TempfileManager @@ -575,7 +578,7 @@ def solve(model): elif (results.solver.termination_condition == TerminationCondition.infeasible): if hasattr(model, "iis"): print "Model was infeasible; irreducibly inconsistent set (IIS) returned by solver:" - print "\n".join(c.name for c in model.iis) + print "\n".join(sorted(c.name for c in model.iis)) else: print "Model was infeasible; if the solver can generate an irreducibly inconsistent set (IIS)," print "more information may be available by setting the appropriate flags in the " From bccfdff2584b7ff1c92b3ce8847be26fdf483842 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 28 Jun 2018 17:15:01 -1000 Subject: [PATCH 34/51] Begin work on fixed time-of-use pricing in iterative demand response system. --- .../demand_response/iterative/__init__.py | 257 ++++++++++-------- 1 file changed, 150 insertions(+), 107 deletions(-) diff --git a/switch_model/balancing/demand_response/iterative/__init__.py b/switch_model/balancing/demand_response/iterative/__init__.py index db7ee6948..1ffcf8fea 100644 --- a/switch_model/balancing/demand_response/iterative/__init__.py +++ b/switch_model/balancing/demand_response/iterative/__init__.py @@ -2,8 +2,8 @@ cancel out the basic system load and replace it with a convex combination of bids note: the demand_module (or some subsidiary module) may store calibration data -at the module level (not in the model), so this module should only be used with one -model at a time. An alternative approach would be to receive a calibration_data +at the module level (not in the model), so this module should only be used with one +model at a time. An alternative approach would be to receive a calibration_data object back from demand_module.calibrate(), then add that to the model and pass it back to the bid function when needed. @@ -16,7 +16,7 @@ # (this is a fixed adder to the cost in $/kWh, not a multiplier times the marginal cost) # that module can be used as-is to find the effect of any particular adder # or it can iterate at a level above the demand_response module -# and use something like scipy.optimize.newton() to find the right tax to come out +# and use something like scipy.optimize.newton() to find the right tax to come out # revenue-neutral (i.e., recover any stranded costs, rebate any supply-side rents) import os, sys, time @@ -66,7 +66,7 @@ def define_components(m): .format(mod=m.options.dr_demand_module) ) demand_module = sys.modules[m.options.dr_demand_module] - + # load scipy.optimize for use later try: global scipy @@ -77,13 +77,13 @@ def define_components(m): print "Please install this via 'conda install scipy' or 'pip install scipy'." print "="*80 raise - + # Make sure the model has dual and rc suffixes if not hasattr(m, "dual"): m.dual = Suffix(direction=Suffix.IMPORT) if not hasattr(m, "rc"): m.rc = Suffix(direction=Suffix.IMPORT) - + ################### # Unserved load, with a penalty. # to ensure the model is always feasible, no matter what demand bids we get @@ -108,7 +108,7 @@ def define_components(m): ################### # Price Responsive Demand bids ################## - + # list of all bids that have been received from the demand system m.DR_BID_LIST = Set(initialize = [], ordered=True) # we need an explicit indexing set for everything that depends on DR_BID_LIST @@ -116,9 +116,9 @@ def define_components(m): # (not needed, and actually doesn't work -- reconstruct() fails for sets) # m.DR_BIDS_LZ_TP = Set(initialize = lambda m: m.DR_BID_LIST * m.LOAD_ZONES * m.TIMEPOINTS) # m.DR_BIDS_LZ_TS = Set(initialize = lambda m: m.DR_BID_LIST * m.LOAD_ZONES * m.TIMESERIES) - + # data for the individual bids; each load_zone gets one bid for each timeseries, - # and each bid covers all the timepoints in that timeseries. So we just record + # and each bid covers all the timepoints in that timeseries. So we just record # the bid for each timepoint for each load_zone. m.dr_bid = Param(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMEPOINTS, m.DR_PRODUCTS, mutable=True) @@ -130,7 +130,7 @@ def define_components(m): # weights to assign to the bids for each timeseries when constructing an optimal demand profile m.DRBidWeight = Var(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, within=NonNegativeReals) - + # def DR_Convex_Bid_Weight_rule(m, z, ts): # if len(m.DR_BID_LIST) == 0: # print "no items in m.DR_BID_LIST, skipping DR_Convex_Bid_Weight constraint" @@ -138,13 +138,13 @@ def define_components(m): # else: # print "constructing DR_Convex_Bid_Weight constraint" # return (sum(m.DRBidWeight[b, z, ts] for b in m.DR_BID_LIST) == 1) - # + # # choose a convex combination of bids for each zone and timeseries m.DR_Convex_Bid_Weight = Constraint(m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, z, ts: - Constraint.Skip if len(m.DR_BID_LIST) == 0 + Constraint.Skip if len(m.DR_BID_LIST) == 0 else (sum(m.DRBidWeight[b, z, ts] for b in m.DR_BID_LIST) == 1) ) - + # Since we don't have differentiated prices for each zone, we have to use the same # weights for all zones. (Otherwise the model will try to micromanage load in each # zone, but that won't be reflected in the prices we report.) @@ -165,10 +165,10 @@ def define_components(m): m.DRBidWeight[b, z, ts] == m.DRBidWeight[b, z, m.tp_ts[m.TPS_IN_PERIOD[m.ts_period[ts]].first()]] ) - - + + # Optimal level of demand, calculated from available bids (negative, indicating consumption) - m.FlexibleDemand = Expression(m.LOAD_ZONES, m.TIMEPOINTS, + m.FlexibleDemand = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, tp: sum( m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp, 'energy'] for b in m.DR_BID_LIST @@ -177,13 +177,13 @@ def define_components(m): # provide up and down reserves (from supply perspective, so "up" means less load) # note: the bids are negative quantities, indicating _production_ of reserves; # they contribute to the reserve requirement with opposite sign - m.DemandUpReserves = Expression(m.LOAD_ZONES, m.TIMEPOINTS, + m.DemandUpReserves = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, tp: -sum( m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp, 'energy up'] for b in m.DR_BID_LIST ) ) - m.DemandDownReserves = Expression(m.LOAD_ZONES, m.TIMEPOINTS, + m.DemandDownReserves = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, tp: -sum( m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid[b, z, tp, 'energy down'] for b in m.DR_BID_LIST @@ -216,13 +216,13 @@ def define_components(m): idx = m.Zone_Power_Withdrawals.index('zone_demand_mw') m.Zone_Power_Withdrawals[idx] = 'FlexibleDemand' - # private benefit of the electricity consumption + # private benefit of the electricity consumption # (i.e., willingness to pay for the current electricity supply) # reported as negative cost, i.e., positive benefit # also divide by number of timepoints in the timeseries # to convert from a cost per timeseries to a cost per timepoint. m.DR_Welfare_Cost = Expression(m.TIMEPOINTS, rule=lambda m, tp: - (-1.0) + (-1.0) * sum(m.DRBidWeight[b, z, m.tp_ts[tp]] * m.dr_bid_benefit[b, z, m.tp_ts[tp]] for b in m.DR_BID_LIST for z in m.LOAD_ZONES) * m.tp_duration_hrs[tp] / m.ts_num_tps[m.tp_ts[tp]] @@ -234,23 +234,66 @@ def define_components(m): # variable to store the baseline data m.base_data = None + # # TODO: create a data file that lists which timepoints are grouped into each flat + # # pricing block; also enforce a requirement that no block can span periods. + # # Then use that to choose flat prices for each block in each period when flat pricing + # # is turned on (or maybe only when TOU block pricing is turned on). + # # Price must be flat within each block, and total revenue across all blocks in each + # # period must equal total marginal cost for those loads. + # + # # Hours during each day that fall into each flat-pricing block (zero-based). + # # Note: this does not allow for blocks shorter than one hour, and if timepoints + # # are longer than one hour, they will be placed in the first matching hour. + # m.FLAT_PRICING_BLOCKS = Set() + # raise NotImplementedError("The line above just contained `Set(` until 6/27/18; something is missing here.") + # + # # Times during each day to switch from one flat-pricing block to another; should be a float + # # between 0 (midnight) and 24 (following midnight). Timepoints will be assigned to + # # the immediately preceding block. Default is 0 (single block all day). + # # This assumes that timepoints begin at midnight each day and are sequenced + # # from there. + # m.FLAT_PRICING_BREAK_TIMES = Set(default=[0]) + # m.FLAT_PRICING_GROUPS = Set(initialize=m.PERIODS * m.FLAT_PRICING_START_TIMES) + # def rule(m, p, st): + # try: + # d = m.TPS_FOR_FLAT_PRICING_GROUP_dict + # except AttributeError: + # d = m.TPS_FOR_FLAT_PRICING_GROUP_dict = dict() + # # construct a dictionary of which timepoints fall in each block + # # tuples show starting time and + # sorted(range(len(seq)), key=seq.__getitem__) + # start_times = sorted(m.FLAT_PRICING_START_TIMES) + # cur_start = xxx + # raise NotImplementedError("The line above just contained `cur_start =` until 6/27/18; something is missing here.") + # + # start_time_tuples = [(s, 0) for s in m.FLAT_PRICING_START_TIMES] + # for ts in m.TIMESERIES: + # timepoint_tuples = [(i * m.ts_duration_of_tp[ts], tp) for i, tp in enumerate(m.TS_TPS[ts])] + # + # return d.pop(p, st) + # + # m.TPS_FOR_FLAT_PRICING_GROUP = Set(m.FLAT_PRICING_GROUPS, initialize=rule) + # + # m.tp_flat_pricing_block = Param(m.TIMEPOINTS, within=m.FLAT_PRICING_START_TIMES, initialize=rule) + + def pre_iterate(m): # could all prev values be stored in post_iterate? # then this func would just alter the model based on values calculated in post_iterate # (e.g., get a bid based on current prices, add bid to model, rebuild components) - + # NOTE: - # bids must be added to the model here, and the model must be reconstructed here, + # bids must be added to the model here, and the model must be reconstructed here, # so the model can then be solved and remain in a "solved" state through the end # of post-iterate, to avoid problems in final reporting. - + # store various properties from previous model solution for later reference if m.iteration_number == 0: # model hasn't been solved yet m.prev_marginal_cost = { (z, tp, prod): None for z in m.LOAD_ZONES for tp in m.TIMEPOINTS for prod in m.DR_PRODUCTS - } + } m.prev_demand = { (z, tp, prod): None for z in m.LOAD_ZONES for tp in m.TIMEPOINTS for prod in m.DR_PRODUCTS } @@ -274,7 +317,7 @@ def pre_iterate(m): # solution based on the prior round of bids, rather than comparing the new # bid to the prior solution to the master problem. This is probably fine. # TODO: does this correctly account for producer surplus? It seems like that's - # being treated as a cost (embedded in MC * demand); maybe this should use + # being treated as a cost (embedded in MC * demand); maybe this should use # total direct cost instead, # or focus specifically on consumer surplus (use prices instead of MC as the # convergence measure). But maybe this is OK, since the question is, "if we @@ -282,7 +325,7 @@ def pre_iterate(m): # we had then? no change for altered volume?), would everyone be much # better off than they are with the allocation we have now chosen?" # Maybe using MC lets us focus on whether there can be another incrementally - # different solution that would be much better than the one we have now. + # different solution that would be much better than the one we have now. # This ignores other solutions far away, where an integer variable is flipped, # but that's OK. (?) prev_direct_cost = value(sum( @@ -319,7 +362,7 @@ def pre_iterate(m): print 'previous direct cost: ${:,.0f}'.format(prev_direct_cost) print 'previous welfare cost: ${:,.0f}'.format(prev_welfare_cost) print "" - + # get the next bid and attach it to the model update_demand(m) @@ -367,7 +410,7 @@ def pre_iterate(m): print 'best direct cost: ${:,.0f}'.format(best_direct_cost) print 'best bid benefit: ${:,.0f}'.format(best_bid_benefit) print "" - + print "lower bound=${:,.0f}, previous cost=${:,.0f}, optimality gap (vs direct cost)={}" \ .format(best_cost, prev_cost, (prev_cost-best_cost)/abs(prev_direct_cost)) if prev_cost < best_cost: @@ -380,46 +423,46 @@ def pre_iterate(m): # basis for optimality test: # 1. The total cost of supply, as a function of quantity produced each hour, forms - # a surface which is convex downward, since it is linear (assuming all variables are - # continuous or all integer variables are kept at their current level, i.e., the curve - # is locally convex). (Think of the convex hull of the extreme points of the production + # a surface which is convex downward, since it is linear (assuming all variables are + # continuous or all integer variables are kept at their current level, i.e., the curve + # is locally convex). (Think of the convex hull of the extreme points of the production # cost function.) - # 2. The total benefit of consumption, as a function of quantity consumed each hour, + # 2. The total benefit of consumption, as a function of quantity consumed each hour, # forms a surface which is concave downward (by the assumption/requirement of convexity # of the demand function). # 3. marginal costs (prev_marginal_cost) and production levels (pref_demand) from the - # most recent solution to the master problem define a production cost plane which is - # tangent to the production cost function at that production level. From 1, the production + # most recent solution to the master problem define a production cost plane which is + # tangent to the production cost function at that production level. From 1, the production # cost function must lie on or above this surface everywhere. This plane is given by # (something + prev_marginal_cost * (demand - dr_bid)) - # 4. The last bid quantities (dr_bid) must be at a point where marginal benefit of consumption - # equals marginal cost of consumption (prev_marginal_cost) in all directions; otherwise - # they would not be a private optimum. - # 5. The benefit reported in the last bid (dr_bid_benefit) shows the level of the total + # 4. The last bid quantities (dr_bid) must be at a point where marginal benefit of consumption + # equals marginal cost of consumption (prev_marginal_cost) in all directions; otherwise + # they would not be a private optimum. + # 5. The benefit reported in the last bid (dr_bid_benefit) shows the level of the total # benefit curve at that point. # 6. From 2, 4 and 5, the prev_marginal_cost and the last reported benefit must form # a plane which is at or above the total benefit curve everywhere. This plane is given by # (-DR_Welfare_Cost - (prev_marginal_cost * (demand - prev_demand) + something)) # 7. Since the total cost curve must lie above the plane defined in 3. and the total - # benefit curve must lie below the plane defined in 6., the (constant) distance between + # benefit curve must lie below the plane defined in 6., the (constant) distance between # these planes is an upper bound on the net benefit that can be obtained. This is given by # (-DR_Welfare_Cost - prev_marginal_cost * (demand - prev_demand)) # - (prev_marginal_cost * (demand - dr_bid)) # = ... - + # (prev_marginal_cost * (demand - dr_bid)) # - (prev_marginal_cost * (demand - prev_demand) ) - # - - # = prev_marginal_cost * prev_demand + DR_Welfare_Cost + # - + # = prev_marginal_cost * prev_demand + DR_Welfare_Cost # - (prev_marginal_cost * dr_bid - dr_bid_benefit) - - # Check for convergence -- optimality gap is less than 0.1% of best possible cost + + # Check for convergence -- optimality gap is less than 0.1% of best possible cost # (which may be negative) # TODO: index this to the direct costs, rather than the direct costs minus benefits - # as it stands, it converges with about $50,000,000 optimality gap, which is about + # as it stands, it converges with about $50,000,000 optimality gap, which is about # 3% of direct costs. converged = (m.iteration_number > 0 and (prev_cost - best_cost)/abs(prev_direct_cost) <= 0.0001) - + return converged def post_iterate(m): @@ -429,7 +472,7 @@ def post_iterate(m): print "Total cost: ${v:,.0f}".format(v=value(m.SystemCost)) - # TODO: + # TODO: # maybe calculate prices for the next round here and attach them to the # model, so they can be reported as final prices (currently we don't # report the final prices, only the prices prior to the final model run) @@ -447,10 +490,10 @@ def post_iterate(m): # report information on most recent bid if m.iteration_number == 0: util.create_table( - output_file=os.path.join(outputs_dir, "bid_{t}.tsv".format(t=tag)), - headings= + output_file=os.path.join(outputs_dir, "bid_{t}.tsv".format(t=tag)), + headings= ( - "bid_num", "load_zone", "timeseries", "timepoint", + "bid_num", "load_zone", "timeseries", "timepoint", ) + tuple("marginal_cost " + prod for prod in m.DR_PRODUCTS) + tuple("price " + prod for prod in m.DR_PRODUCTS) + tuple("bid " + prod for prod in m.DR_PRODUCTS) @@ -461,14 +504,14 @@ def post_iterate(m): b = m.DR_BID_LIST.last() # current bid util.append_table( m, m.LOAD_ZONES, m.TIMEPOINTS, - output_file=os.path.join(outputs_dir, "bid_{t}.tsv".format(t=tag)), + output_file=os.path.join(outputs_dir, "bid_{t}.tsv".format(t=tag)), values=lambda m, z, tp: ( b, z, m.tp_ts[tp], m.tp_timestamp[tp], - ) + ) + tuple(m.prev_marginal_cost[z, tp, prod] for prod in m.DR_PRODUCTS) + tuple(m.dr_price[b, z, tp, prod] for prod in m.DR_PRODUCTS) + tuple(m.dr_bid[b, z, tp, prod] for prod in m.DR_PRODUCTS) @@ -482,25 +525,25 @@ def post_iterate(m): # store the current bid weights for future reference if m.iteration_number == 0: util.create_table( - output_file=os.path.join(outputs_dir, "bid_weights_{t}.tsv".format(t=tag)), + output_file=os.path.join(outputs_dir, "bid_weights_{t}.tsv".format(t=tag)), headings=("iteration", "load_zone", "timeseries", "bid_num", "weight") ) - util.append_table(m, m.LOAD_ZONES, m.TIMESERIES, m.DR_BID_LIST, - output_file=os.path.join(outputs_dir, "bid_weights_{t}.tsv".format(t=tag)), + util.append_table(m, m.LOAD_ZONES, m.TIMESERIES, m.DR_BID_LIST, + output_file=os.path.join(outputs_dir, "bid_weights_{t}.tsv".format(t=tag)), values=lambda m, z, ts, b: (len(m.DR_BID_LIST), z, ts, b, m.DRBidWeight[b, z, ts]) ) - + # if m.iteration_number % 5 == 0: # # save time by only writing results every 5 iterations # write_results(m) - + write_dual_costs(m) write_results(m) write_batch_results(m) # if m.iteration_number >= 3: # import pdb; pdb.set_trace() - + def update_demand(m): """ @@ -523,7 +566,7 @@ def update_demand(m): # get new bids from the demand system at the current prices bids = get_bids(m) - + # add the new bids to the model if m.options.verbose: print "adding bids to model" @@ -535,13 +578,13 @@ def update_demand(m): # for b in m.DR_BID_LIST # for z in m.LOAD_ZONES # for ts in [m.TIMESERIES.first()]]) - + # print "m.dr_bid (first day):" # print [(b, z, ts, value(m.dr_bid[b, z, ts])) # for b in m.DR_BID_LIST # for z in m.LOAD_ZONES # for ts in m.TPS_IN_TS[m.TIMESERIES.first()]] - + def total_direct_costs_per_year(m, period): """Return undiscounted total cost per year, during each period, as calculated by SWITCH, @@ -549,7 +592,7 @@ def total_direct_costs_per_year(m, period): This code comes from financials.calc_sys_costs_per_period(), excluding discounting and upscaling to the period. - + NOTE: ideally this would give costs by zone and period, to allow calibration for different utilities within a large study. But the cost components don't distinguish that way. (They probably should, since that would allow us to discuss average electricity costs @@ -563,7 +606,7 @@ def total_direct_costs_per_year(m, period): for tp_cost in m.Cost_Components_Per_TP if tp_cost != "DR_Welfare_Cost" ) - ) + ) def electricity_marginal_cost(m, z, tp, prod): """Return marginal cost of providing product prod in load_zone z during timepoint tp.""" @@ -576,7 +619,7 @@ def electricity_marginal_cost(m, z, tp, prod): else: raise ValueError('Unrecognized electricity product: {}.'.format(prod)) return m.dual[component]/m.bring_timepoint_costs_to_base_year[tp] - + def electricity_demand(m, z, tp, prod): """Return total consumption of product prod in load_zone z during timepoint tp (negative if customers supply product).""" if prod == 'energy': @@ -596,16 +639,16 @@ def electricity_demand(m, z, tp, prod): raise ValueError('Unrecognized electricity product: {}.'.format(prod)) return demand - + def calibrate_model(m): """ - Calibrate the demand system and add it to the model. + Calibrate the demand system and add it to the model. """ - + # base_data consists of a list of tuples showing (load_zone, timeseries, base_load (list) and base_price) # note: the constructor below assumes list comprehensions will preserve the order of the underlying list # (which is guaranteed according to http://stackoverflow.com/questions/1286167/is-the-order-of-results-coming-from-a-list-comprehension-guaranteed) - + # calculate the average-cost price for the current study period # TODO: store monthly retail prices in system_load, and find annual average prices # that correspond to the load forecasts for each period, then store scale factors @@ -613,23 +656,23 @@ def calibrate_model(m): # years (same technique as rescaling the loads, but only adjusting the mean), then # report base prices for each timepoint along with the loads in loads.tab. # For now, we just assume the base price was $180/MWh, which is HECO's average price in - # 2007 according to EIA form 826. + # 2007 according to EIA form 826. # TODO: add in something for the fixed costs, to make marginal cost commensurate with the base_price #baseCosts = [m.dual[m.EnergyBalance[z, tp]] for z in m.LOAD_ZONES for tp in m.TIMEPOINTS] base_price = 180 # average retail price for 2007 ($/MWh) m.base_data = [( z, - ts, + ts, [m.zone_demand_mw[z, tp] for tp in m.TPS_IN_TS[ts]], [base_price] * len(m.TPS_IN_TS[ts]) ) for z in m.LOAD_ZONES for ts in m.TIMESERIES] - + # make a dict of base_data, indexed by load_zone and timepoint, for later reference m.base_data_dict = { (z, tp): (m.zone_demand_mw[z, tp], base_price) for z in m.LOAD_ZONES for tp in m.TIMEPOINTS } - + # calibrate the demand module demand_module.calibrate(m, m.base_data) @@ -645,9 +688,9 @@ def get_prices(m, flat_revenue_neutral=True): prod: ( [m.base_data_dict[z, tp][1] for tp in m.TPS_IN_TS[ts]] if prod == 'energy' else [0.0]*len(m.TPS_IN_TS[ts]) - ) - for prod in m.DR_PRODUCTS - } + ) + for prod in m.DR_PRODUCTS + } for z in m.LOAD_ZONES for ts in m.TIMESERIES } else: @@ -667,7 +710,7 @@ def get_prices(m, flat_revenue_neutral=True): prices = find_flat_prices(m, marginal_costs, flat_revenue_neutral) else: prices = marginal_costs - + return prices def get_bids(m): @@ -728,7 +771,7 @@ def find_flat_prices(m, marginal_costs, revenue_neutral): # calc revenue balance for LSE (q*price - q.MC) # if > 0: decrease price (q will go up across the board) # if < 0: increase price (q will go down across the board) but - + flat_prices = dict() for z in m.LOAD_ZONES: for p in m.PERIODS: @@ -736,10 +779,10 @@ def find_flat_prices(m, marginal_costs, revenue_neutral): sum( marginal_costs[z, ts]['energy'][i] * electricity_demand(m, z, tp, 'energy') - * m.tp_weight_in_year[tp] + * m.tp_weight_in_year[tp] for ts in m.TS_IN_PERIOD[p] for i, tp in enumerate(m.TPS_IN_TS[ts]) ) - / + / sum(electricity_demand(m, z, tp, 'energy') * m.tp_weight_in_year[tp] for tp in m.TPS_IN_PERIOD[p]) ) @@ -756,18 +799,18 @@ def find_flat_prices(m, marginal_costs, revenue_neutral): # bought the final constructed quantity at the final # marginal cost flat_prices[z, p] = price_guess - + # construct a collection of flat prices with the right structure final_prices = { (z, ts): - { + { prod: [flat_prices[z, p] if prod=='energy' else 0.0] * len(m.TPS_IN_TS[ts]) for prod in m.DR_PRODUCTS } for z in m.LOAD_ZONES for p in m.PERIODS for ts in m.TS_IN_PERIOD[p] } return final_prices - + def revenue_imbalance(flat_price, m, load_zone, period, dynamic_prices): """find demand and revenue that would occur in this load_zone and period with flat prices, and @@ -795,16 +838,16 @@ def revenue_imbalance(flat_price, m, load_zone, period, dynamic_prices): imbalance = dynamic_price_revenue - flat_price_revenue print "{}, {}: price ${} produces revenue imbalance of ${}/year".format(load_zone, period, flat_price, imbalance) - + return imbalance def add_bids(m, bids): - """ + """ accept a list of bids written as tuples like (z, ts, prod, prices, demand, wtp) where z is the load zone, ts is the timeseries, prod is the product, - demand is a list of demand levels for the timepoints during that series (possibly negative, to sell), + demand is a list of demand levels for the timepoints during that series (possibly negative, to sell), and wtp is the net private benefit from consuming/selling the amount of power in that bid. Then add that set of bids to the model """ @@ -813,7 +856,7 @@ def add_bids(m, bids): b = 1 else: b = max(m.DR_BID_LIST) + 1 - + m.DR_BID_LIST.add(b) # add the bids for each load zone and timepoint to the dr_bid list @@ -839,11 +882,11 @@ def add_bids(m, bids): m.DemandUpReserves.reconstruct() m.DemandDownReserves.reconstruct() m.DR_Welfare_Cost.reconstruct() - # it seems like we have to reconstruct the higher-level components that depend on these + # it seems like we have to reconstruct the higher-level components that depend on these # ones (even though these are Expressions), because otherwise they refer to objects that - # used to be returned by the Expression but aren't any more (e.g., versions of DRBidWeight + # used to be returned by the Expression but aren't any more (e.g., versions of DRBidWeight # that no longer exist in the model). - # (i.e., Energy_Balance refers to the items returned by FlexibleDemand instead of referring + # (i.e., Energy_Balance refers to the items returned by FlexibleDemand instead of referring # to FlexibleDemand itself) m.Energy_Balance.reconstruct() if hasattr(m, 'SpinningReservesUpAvailable'): @@ -860,13 +903,13 @@ def reconstruct_energy_balance(m): # copy the existing Energy_Balance object old_Energy_Balance = dict(m.Energy_Balance) m.Energy_Balance.reconstruct() - # TODO: now that this happens just before a solve, there may be no need to + # TODO: now that this happens just before a solve, there may be no need to # preserve duals across the reconstruct(). if m.iteration_number > 0: for k in old_Energy_Balance: # change dual entries to match new Energy_Balance objects m.dual[m.Energy_Balance[k]] = m.dual.pop(old_Energy_Balance[k]) - + def write_batch_results(m): # append results to the batch results file, creating it if needed @@ -874,12 +917,12 @@ def write_batch_results(m): # create a file to hold batch results if it doesn't already exist # note: we retain this file across scenarios so it can summarize all results, - # but this means it needs to be manually cleared before launching a new + # but this means it needs to be manually cleared before launching a new # batch of scenarios (e.g., when running get_scenario_data or clearing the # scenario_queue directory) if not os.path.isfile(output_file): util.create_table(output_file=output_file, headings=summary_headers(m)) - + util.append_table(m, output_file=output_file, values=lambda m: summary_values(m)) def summary_headers(m): @@ -890,34 +933,34 @@ def summary_headers(m): +tuple(prod + ' payment ' + str(p) for prod in m.DR_PRODUCTS for p in m.PERIODS) +tuple(prod + ' sold ' + str(p) for prod in m.DR_PRODUCTS for p in m.PERIODS) ) - + def summary_values(m): demand_components = [ c for c in ('zone_demand_mw', 'ShiftDemand', 'ChargeEVs', 'FlexibleDemand') if hasattr(m, c) ] values = [] - + # tag (configuration) values.extend([ m.options.scenario_name, m.iteration_number, m.SystemCost # total cost (all periods) ]) - + # direct costs (including "other") values.extend([total_direct_costs_per_year(m, p) for p in m.PERIODS]) - + # DR_Welfare_Cost values.extend([ sum(m.DR_Welfare_Cost[t] * m.tp_weight_in_year[t] for t in m.TPS_IN_PERIOD[p]) for p in m.PERIODS ]) - + # payments by customers ([expected demand] * [gice offered for that demand]) # note: this uses the final MC to set the final price, rather than using the # final price offered to customers. This creates consistency between the final # quantities and prices. Otherwise, we would use prices that differ from the - # final cost by some random amount, and the split between PS and CS would + # final cost by some random amount, and the split between PS and CS would # jump around randomly. # note: if switching to using the offered prices, then you may have to use None # as the customer payment during iteration 0, since m.dr_price[last_bid, z, tp, prod] @@ -956,10 +999,10 @@ def get(component, idx, default): def write_results(m): outputs_dir = m.options.outputs_dir tag = filename_tag(m) - + avg_ts_scale = float(sum(m.ts_scale_to_year[ts] for ts in m.TIMESERIES))/len(m.TIMESERIES) last_bid = m.DR_BID_LIST.last() - + # get final prices that will be charged to customers (not necessarily # the same as the final prices they were offered, if iteration was # stopped before complete convergence) @@ -1008,10 +1051,10 @@ def write_results(m): # for tp in m.TIMEPOINTS # for prod in m.DR_PRODUCTS # } - + util.write_table( m, m.LOAD_ZONES, m.TIMEPOINTS, - output_file=os.path.join(outputs_dir, "energy_sources{t}.tsv".format(t=tag)), + output_file=os.path.join(outputs_dir, "energy_sources{t}.tsv".format(t=tag)), headings= ("load_zone", "period", "timepoint_label") +tuple(m.FUELS) @@ -1025,8 +1068,8 @@ def write_results(m): +tuple("final price "+prod for prod in m.DR_PRODUCTS) +tuple("final q "+prod for prod in m.DR_PRODUCTS) +("peak_day", "base_load", "base_price"), - values=lambda m, z, t: - (z, m.tp_period[t], m.tp_timestamp[t]) + values=lambda m, z, t: + (z, m.tp_period[t], m.tp_timestamp[t]) +tuple( sum(DispatchGenByFuel(m, p, t, f) for p in m.GENERATION_PROJECTS_BY_FUEL[f]) for f in m.FUELS @@ -1055,7 +1098,7 @@ def write_results(m): m.base_data_dict[z, t][1], ) ) - + # import pprint # b=[(g, pe, value(m.BuildGen[g, pe]), m.gen_tech[g], m.gen_overnight_cost[g, pe]) for (g, pe) in m.BuildGen if value(m.BuildGen[g, pe]) > 0] # bt=set(x[3] for x in b) # technologies @@ -1082,7 +1125,7 @@ def write_dual_costs(m): dual_data = [] start_time = time.time() print "Writing {} ... ".format(outfile), - + def add_dual(const, lbound, ubound, duals, prefix='', offset=0.0): if const in duals: dual = duals[const] From b407a193c86596cf93a70368779baaa7537a45c9 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 28 Jun 2018 17:31:44 -1000 Subject: [PATCH 35/51] Respect --verbose flag in hawaii.psip_2016_12 --- switch_model/hawaii/psip_2016_12.py | 153 ++++++++++++++-------------- 1 file changed, 77 insertions(+), 76 deletions(-) diff --git a/switch_model/hawaii/psip_2016_12.py b/switch_model/hawaii/psip_2016_12.py index c7c12494b..850e82ae2 100644 --- a/switch_model/hawaii/psip_2016_12.py +++ b/switch_model/hawaii/psip_2016_12.py @@ -8,13 +8,13 @@ def TODO(note): raise NotImplementedError(dedent(note)) def define_arguments(argparser): - argparser.add_argument('--psip-force', action='store_true', default=True, + argparser.add_argument('--psip-force', action='store_true', default=True, help="Force following of PSIP plans (retiring AES and building certain technologies).") - argparser.add_argument('--psip-relax', dest='psip_force', action='store_false', + argparser.add_argument('--psip-relax', dest='psip_force', action='store_false', help="Relax PSIP plans, to find a more optimal strategy.") - argparser.add_argument('--psip-minimal-renewables', action='store_true', default=False, + argparser.add_argument('--psip-minimal-renewables', action='store_true', default=False, help="Use only the amount of renewables shown in PSIP plans, and no more (should be combined with --psip-relax).") - argparser.add_argument('--force-build', nargs=3, default=None, + argparser.add_argument('--force-build', nargs=3, default=None, help="Force construction of at least a certain quantity of a particular technology during certain years. Space-separated list of year, technology and quantity.") def is_renewable(tech): @@ -25,10 +25,10 @@ def is_battery(tech): def define_components(m): ################### # resource rules to match HECO's 2016-04-01 PSIP - ################## - + ################## + # decide whether to enforce the PSIP preferred plan - # if an environment variable is set, that takes precedence + # if an environment variable is set, that takes precedence # (e.g., on a cluster to override options.txt) psip_env_var = os.environ.get('USE_PSIP_PLAN') if psip_env_var is None: @@ -41,10 +41,11 @@ def define_components(m): else: raise ValueError('Unrecognized value for environment variable USE_PSIP_PLAN={} (should be 0 or 1)'.format(psip_env_var)) - if psip: - print "Using PSIP construction plan." - else: - print "Relaxing PSIP construction plan." + if m.options.verbose: + if psip: + print "Using PSIP construction plan." + else: + print "Relaxing PSIP construction plan." # don't allow addition of anything other than those specified here # force retirement of AES at end of 2022 @@ -53,45 +54,45 @@ def define_components(m): # CIP CT-1, W9, W10, Airport DSG, Schofield, IC_Barge, IC_MCBH, Kalaeloa # no use of LNG - - # force battery installations directly (since they're not currently a standard tech) - + + # force battery installations directly (since they're not currently a standard tech) + # NOTE: RESOLVE used different wind and solar profiles from SWITCH. # SWITCH profiles seem to be more accurate, so we optimize against them - # and show that this may give (small) savings vs. the RESOLVE plan. - + # and show that this may give (small) savings vs. the RESOLVE plan. + # TODO: Should I use Switch to investigate how much of HECO's poor performance is due # to using bad resource profiles (small onshore wind that doesn't rise in the rankings), - # how much is due to capping PV at 300 MW in 2020, + # how much is due to capping PV at 300 MW in 2020, # how much is due to non-integrality in RESOLVE (fixed by later jimmying by HECO), and # how much is due to forcing in elements before and after the optimization? - # NOTE: I briefly moved future DistPV to the existing plants workbook, with the idea that + # NOTE: I briefly moved future DistPV to the existing plants workbook, with the idea that # we assume the same forecasted adoption occurs with or without the PSIP. That approach # also spread the DistPV adoption among the top half of tranches, rather than allowing # Switch to cherry-pick the best tranches. However, that approach was ineffective because - # Switch was still able to add (and did add) DistPV from the lower tranches. That could - # have been fixed up in import_data.py, or the DistPV could have been moved here, into + # Switch was still able to add (and did add) DistPV from the lower tranches. That could + # have been fixed up in import_data.py, or the DistPV could have been moved here, into # technology_targets_definite. However, on further reflection, forcing DistPV installations - # to always match the PSIP forecast seems artificial -- it might be better to do DistPV - # than utility-scale PV, and there's no reason to preclude that in the non-PSIP plans. + # to always match the PSIP forecast seems artificial -- it might be better to do DistPV + # than utility-scale PV, and there's no reason to preclude that in the non-PSIP plans. # (Although it's probably not worth dwelling long on differences if they arise, since they - # won't make a huge difference in cost.) So now the DistPV is treated as just another optional + # won't make a huge difference in cost.) So now the DistPV is treated as just another optional # part of the PSIP plan. Note that this allows Switch to cherry-pick among the best DistPV # tranches to meet the PSIP, but that is a little conservative (favorable to HECO), because # Switch can also do that for the non-PSIP scenarios. Also, these targets are roughly equal # to the top half of the DistPV tranches, so there's not much cherry-picking going on anyway. # This could be resolved by setting (optional) project-specific targets in this module, # or by making the DistPV tranches coarser (e.g., upper half, third quartile, fourth quartile), - # which seems like a good idea for representing the general precision of DistPV policies + # which seems like a good idea for representing the general precision of DistPV policies # anyway. - - # TODO (maybe): set project-specific targets, so that DistPV targets can be spread among tranches + + # TODO (maybe): set project-specific targets, so that DistPV targets can be spread among tranches # and specific projects in the PSIP can be represented accurately (really just NPM wind). This - # might also allow reconstruction of exactly the same existing or PSIP project when retired - # (as specified in the PSIP). Currently the code below lets Switch choose the best project with the + # might also allow reconstruction of exactly the same existing or PSIP project when retired + # (as specified in the PSIP). Currently the code below lets Switch choose the best project with the # same technology when it replaces retired renewable projects. - + # targets for individual generation technologies # (year, technology, MW added) # TODO: allow either CentralFixedPV or CentralTrackingPV for utility-scale solar @@ -109,31 +110,31 @@ def define_components(m): b = tuple(b) print "Forcing build: {}".format(b) technology_targets_definite.append(b) - + # technologies proposed in PSIP but which may not be built if a better plan is found. # All from final plan in Table 4-1 of PSIP 2016-12-23 sometimes cross-referenced with PLEXOS inputs. # These differ somewhat from inputs to RESOLVE or the RESOLVE plans in Table 3-1 and 3-4, but # they represent HECO's final plan as reported in the PSIP. - technology_targets_psip = [ - # Na Pua Makani (NPM) wind (still awaiting approval as of Feb. 2018) note: this is at a - # specific location (21.668 -157.956), but since it isn't in the existing plants + technology_targets_psip = [ + # Na Pua Makani (NPM) wind (still awaiting approval as of Feb. 2018) note: this is at a + # specific location (21.668 -157.956), but since it isn't in the existing plants # workbook, we represent it as a generic technology target. - # note: Resolve modeled 134 MW of planned onshore wind, 30 MW of optional onshore + # note: Resolve modeled 134 MW of planned onshore wind, 30 MW of optional onshore # and 800 MW of optional offshore; See "data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input and Output Files by Case/E3 and Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/capacity_limits.tab". - # planned seems to correspond to Na Pua Makani (24), CBRE (10), Kahuku (30), Kawailoka (69); - # Resolve built 273 MW offshore in 2025-45 (including 143 MW rebuilt in 2045), + # planned seems to correspond to Na Pua Makani (24), CBRE (10), Kahuku (30), Kawailoka (69); + # Resolve built 273 MW offshore in 2025-45 (including 143 MW rebuilt in 2045), # and 30 MW onshore in 2045 (tables 3-1 and 3-4). - # Not clear why it picked offshore before onshore (maybe bad resource profiles?). But + # Not clear why it picked offshore before onshore (maybe bad resource profiles?). But # in their final plan (table 4-1), HECO changed it to 200 MW offshore in 2025 (presumably rebuilt # in 2045) and 30 MW onshore in 2045. (2018, 'OnshoreWind', 24), # Na Pua Makani (NPM) wind (2018, 'OnshoreWind', 10), # CBRE wind # note: 109.6 MW SunEdison replacements are in Existing Plants workbook. - - # note: RESOLVE had 53.6 MW of planned PV, which is probably Waianae (27.6), Kalaeloa (5) - # and West Loch (20). Then it added these (table 3-1): 2020: 300 MW (capped, see "renewable_limits.tab"), - # 2022: 48 MW, 2025: 193 MW, 2040: 541 (incl. 300 MW rebuild), 2045: 1400 MW (incl. 241 MW rebuild). - # HECO converted this to 109.6 MW of replacement SunEdison waiver projects in 2018 + + # note: RESOLVE had 53.6 MW of planned PV, which is probably Waianae (27.6), Kalaeloa (5) + # and West Loch (20). Then it added these (table 3-1): 2020: 300 MW (capped, see "renewable_limits.tab"), + # 2022: 48 MW, 2025: 193 MW, 2040: 541 (incl. 300 MW rebuild), 2045: 1400 MW (incl. 241 MW rebuild). + # HECO converted this to 109.6 MW of replacement SunEdison waiver projects in 2018 # (we list those as "existing") and other additions shown below. (2018, 'CentralTrackingPV', 15), # CBRE PV (2020, 'CentralTrackingPV', 180), @@ -148,31 +149,31 @@ def define_components(m): (2045, 'IC_MCBH', 68.0), # proxy for 68 MW of generic ICE capacity # batteries (MW) - # from PSIP 2016-12-23 Table 4-1; also see energy ("capacity") and power files in + # from PSIP 2016-12-23 Table 4-1; also see energy ("capacity") and power files in # "data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/Battery" # (note: we mistakenly treated these as MWh quantities instead of MW before 2018-02-20) (2019, 'Battery_Conting', 90), - (2022, 'Battery_4', 426), + (2022, 'Battery_4', 426), (2025, 'Battery_4', 29), (2030, 'Battery_4', 165), (2035, 'Battery_4', 168), (2040, 'Battery_4', 420), (2045, 'Battery_4', 1525), - # RESOLVE modeled 4-hour batteries as being capable of providing reserves, + # RESOLVE modeled 4-hour batteries as being capable of providing reserves, # and didn't model contingency batteries (see data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input and Output Files by Case/E3 and Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/technologies.tab). # Then HECO added a 90 MW contingency battery (table 4-1 of PSIP 2016-12-23). # Note: RESOLVE can get reserves from batteries (they only considered 4-hour batteries), but not # from EVs or flexible demand. - # DR: Looking at RESOLVE inputs, it seems like they take roughly 4% of load, and allow it to be doubled - # or cut to zero each hour (need to double-check this beyond first day). Maybe this includes EVs? + # DR: Looking at RESOLVE inputs, it seems like they take roughly 4% of load, and allow it to be doubled + # or cut to zero each hour (need to double-check this beyond first day). Maybe this includes EVs? # (no separate sign of EVs). # TODO: check Resolve load levels against Switch. # TODO: maybe I should switch over to using the ABC curves and load profiles that HECO used with PLEXOS # (for all islands). # TODO: Did HECO assume 4-hour batteries, demand response or EVs could provide reserves when running PLEXOS? # - all of these seem unlikely, but we have to ask HECO to find out; PLEXOS files are unclear. - - # installations based on changes in installed capacity shown in + + # installations based on changes in installed capacity shown in # data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input and Output Files by Case/E3 Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/planned_installed_capacities.tab # Also see Figure J-10 of 2016-12-23 PSIP (Vol. 3), which matches these levels (excluding FIT(?)). # Note: code further below adds in reconstruction of early installations @@ -184,23 +185,23 @@ def define_components(m): (2040, "DistPV", 1163.4-1015.4), (2045, "DistPV", 1307.9-1163.4), ] - + # Rebuild renewable projects at retirement (20 years), as specified in the PSIP # note: this doesn't include DistPV, because those are part of a forecast, not a plan, so they already # get reconstructed in the existing generators workbook, whether or not the PSIP plan is used. - + # note: this behavior is consistent with the following: # discussion on p. 3-8 of PSIP 2016-12-23 vol. 1. # Resolve applied planned wind and solar as set levels through 2045, not set additions in each year. - # Table 4-1 shows final plans that were sent to Plexos; Plexos input files in + # Table 4-1 shows final plans that were sent to Plexos; Plexos input files in # data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/Theme 5 # show optional capacity built in 2020 or 2025 (in list below) continuing in service in 2045. # and Plexos input files in data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/DBEDT-IR-12/Input/Oahu/Oahu E3 Plan Input/CSV files/PSIP Max Capacity.csv # don't show any retirements of wind and solar included as "planned" in RESOLVE and "existing" in Switch # (Waivers PV1, West Loch; Kawailoa may be omitted?) - # also note: Plexos input files in XX + # also note: Plexos input files in XX # show max battery capacity equal to sum of all prior additions - + # projects from existing plants workbook (pasted in) existing_techs = [ (2011, "OnshoreWind", 30), @@ -213,7 +214,7 @@ def define_components(m): (2018, "CentralTrackingPV", 14.7), (2018, "CentralTrackingPV", 46), (2018, "CentralTrackingPV", 20), - ] + ] existing_techs += technology_targets_definite existing_techs += technology_targets_psip # rebuild all renewables at retirement (20 years for RE, 15 years for batteries) @@ -225,10 +226,10 @@ def define_components(m): # don't schedule rebuilding past end of study rebuild_targets = [t for t in rebuild_targets if t[0] <= 2045] technology_targets_psip += rebuild_targets - + # make sure LNG is turned off if psip and getattr(m.options, "force_lng_tier", []) != ["none"]: - raise RuntimeError('You must use the lng_conversion module and set "--force-lng-tier none" to match the PSIP.') + raise RuntimeError('You must use the lng_conversion module and set "--force-lng-tier none" to match the PSIP.') if psip: technology_targets = technology_targets_definite + technology_targets_psip @@ -240,7 +241,7 @@ def define_components(m): # make a list of renewable technologies m.RENEWABLE_TECHNOLOGIES = Set( - initialize=m.GENERATION_TECHNOLOGIES, + initialize=m.GENERATION_TECHNOLOGIES, filter=lambda m, tech: is_renewable(tech) ) @@ -258,7 +259,7 @@ def technology_target_init(m, per, tech): def MakeGenTechDicts_rule(m): # get unit sizes of all technologies - unit_sizes = m.gen_tech_unit_size_dict = defaultdict(float) + unit_sizes = m.gen_tech_unit_size_dict = defaultdict(float) for g, unit_size in m.gen_unit_size.iteritems(): tech = m.gen_tech[g] if tech in unit_sizes: @@ -267,27 +268,27 @@ def MakeGenTechDicts_rule(m): else: unit_sizes[tech] = unit_size # get predetermined capacity for all technologies - predet_cap = m.gen_tech_predetermined_cap_dict = defaultdict(float) + predet_cap = m.gen_tech_predetermined_cap_dict = defaultdict(float) for (g, per), cap in m.gen_predetermined_cap.iteritems(): tech = m.gen_tech[g] predet_cap[tech, per] += cap m.MakeGenTechDicts = BuildAction(rule=MakeGenTechDicts_rule) - # with PSIP: BuildGen is zero except for technology_targets + # with PSIP: BuildGen is zero except for technology_targets # (sum during each period or before first period) # without PSIP: BuildGen is >= definite targets def Enforce_Technology_Target_rule(m, per, tech): """Enforce targets for each technology; exact target for PSIP cases, minimum target for non-PSIP.""" - - # get target, including any capacity specified in the predetermined builds, + + # get target, including any capacity specified in the predetermined builds, # so the target will be additional to those target = m.technology_target[per, tech] + m.gen_tech_predetermined_cap_dict[tech, per] - + # convert target to closest integral number of units # (some of the targets are based on nominal unit sizes rather than actual max output) if m.gen_tech_unit_size_dict[tech] > 0.0: target = round(target / m.gen_tech_unit_size_dict[tech]) * m.gen_tech_unit_size_dict[tech] - + if tech == "LoadShiftBattery": # special treatment for batteries, which are not a standard technology if hasattr(m, 'BuildBattery'): @@ -298,11 +299,11 @@ def Enforce_Technology_Target_rule(m, per, tech): else: build = sum( m.BuildGen[g, per] - for g in m.GENERATION_PROJECTS + for g in m.GENERATION_PROJECTS if m.gen_tech[g] == tech and (g, per) in m.GEN_BLD_YRS ) - if type(build) is int and build == 0: + if type(build) is int and build == 0: # no matching projects found if target == 0: return Constraint.Skip @@ -335,12 +336,12 @@ def Enforce_Technology_Target_rule(m, per, tech): Constraint.Skip if (aes_g, tp) not in m.GEN_TPS else (m.DispatchGen[aes_g, tp] <= m.OperateAES[m.tp_period[tp]] * aes_size) ) - m.AESDeactivateFixedCost = Expression(m.PERIODS, rule=lambda m, per: + m.AESDeactivateFixedCost = Expression(m.PERIODS, rule=lambda m, per: 0.0 if per not in m.AES_OPERABLE_PERIODS else - m.OperateAES[per] * aes_size * m.gen_fixed_om[aes_g, aes_bld_year] ) m.Cost_Components_Per_Period.append('AESDeactivateFixedCost') - + if psip: # keep AES active until 9/2022; deactivate after that # note: since a period starts in 2022, we retire before that @@ -351,10 +352,10 @@ def Enforce_Technology_Target_rule(m, per, tech): # before 2040: no biodiesel, and only 100-300 GWh of non-LNG fossil fuels # period including 2040-2045: <= 300 GWh of oil; unlimited biodiesel or LNG - + # no biodiesel before 2040 (then phased in fast enough to meet the RPS) m.EARLY_BIODIESEL_MARKETS = Set(dimen=2, initialize=lambda m: [ - (rfm, per) + (rfm, per) for per in m.PERIODS if per + m.period_length_years[per] <= 2040 for rfm in m.REGIONAL_FUEL_MARKETS if m.rfm_fuel == 'Biodiesel' ]) @@ -389,7 +390,7 @@ def Enforce_Technology_Target_rule(m, per, tech): # else # Constraint.Skip # ) - + # force LNG conversion in 2021 (modeled on similar constraint in lng_conversion.py) # This could have extra code to skip the constraint if there are no periods after 2021, # but it is unlikely ever to be run that way. @@ -401,10 +402,10 @@ def Enforce_Technology_Target_rule(m, per, tech): # min(per for per in m.PERIODS if per + m.period_length_years[per] > 2021) # ] == 1 # ) - + # # Kahe 5, Kahe 6, Kalaeloa and CC_383 only burn LNG after 2021 # # This is not used because it creates a weird situation where HECO runs less-efficient non-LNG - # # plants instead of more efficient LNG-capable plants on oil. + # # plants instead of more efficient LNG-capable plants on oil. # # there may be a faster way to build this, but it's not clear what # m.PSIP_Force_LNG_Use = Constraint(m.GEN_TP_FUELS, rule=lambda m, g, tp, fuel: # (m.GenFuelUseRate[g, tp, fuel] == 0) @@ -414,13 +415,13 @@ def Enforce_Technology_Target_rule(m, per, tech): # else # Constraint.Skip # ) - + # don't allow construction of other technologies (e.g., pumped hydro, fuel cells) advanced_tech_vars = [ "BuildPumpedHydroMW", "BuildAnyPumpedHydro", "BuildElectrolyzerMW", "BuildLiquifierKgPerHour", "BuildLiquidHydrogenTankKg", "BuildFuelCellMW", - ] + ] def no_advanced_tech_rule_factory(v): return lambda m, *k: (getattr(m, v)[k] == 0) for v in advanced_tech_vars: From c8c87233dbae52f5a03292ec3879b0daba77ad61 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Fri, 29 Jun 2018 10:30:59 -1000 Subject: [PATCH 36/51] Escape percent signs in help text per https://docs.python.org/2/library/argparse.html#help and https://docs.python.org/3/library/argparse.html#help (the argparse module raises an exception for unescaped % markers) --- .../operating_reserves/spinning_reserves_advanced.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py b/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py index 3bd106149..c2c13c0a6 100644 --- a/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py +++ b/switch_model/balancing/operating_reserves/spinning_reserves_advanced.py @@ -44,8 +44,8 @@ def define_arguments(argparser): choices = ["Hawaii", "3+5", "none"], help=("Choose rules for spinning reserves requirements as a function " "of variable renewable power and load. Hawaii uses rules " - "bootstrapped from the GE RPS study, and '3+5' requires 3% of " - "load and 5% of variable renewable output, based on the heuristic " + "bootstrapped from the GE RPS study, and '3+5' requires 3%% of " + "load and 5%% of variable renewable output, based on the heuristic " "described in the 2010 Western Wind and Solar Integration Study. " "Specify 'none' if applying your own rules instead. " ) From 8847ad5dc8c23a088a56ac0d1977a0e060431e81 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Fri, 29 Jun 2018 10:31:58 -1000 Subject: [PATCH 37/51] Move make_iterable() from reporting to utilities module, since it has more general usefulness. --- switch_model/reporting/__init__.py | 32 +++++++++--------------------- switch_model/utilities.py | 12 +++++++++++ 2 files changed, 21 insertions(+), 23 deletions(-) diff --git a/switch_model/reporting/__init__.py b/switch_model/reporting/__init__.py index 2ffdad93a..a48e7adf0 100644 --- a/switch_model/reporting/__init__.py +++ b/switch_model/reporting/__init__.py @@ -25,6 +25,7 @@ import csv import itertools from pyomo.environ import value, Var +from switch_model.utilities import make_iterable csv.register_dialect( "ampl-tab", @@ -101,20 +102,14 @@ def unpack_elements(items): return l -def make_iterable(item): - """Return an iterable for the one or more items passed.""" - if isinstance(item, basestring): - i = iter([item]) - else: - try: - # check if it's iterable - i = iter(item) - except TypeError: - i = iter([item]) - return i - +def post_solve(instance, outdir): + """ + Minimum output generation for all model runs. + """ + save_generic_results(instance, outdir, instance.options.sorted_output) + save_total_cost_value(instance, outdir) -def _save_generic_results(instance, outdir, sorted_output): +def save_generic_results(instance, outdir, sorted_output): for var in instance.component_objects(): if not isinstance(var, Var): continue @@ -140,15 +135,6 @@ def _save_generic_results(instance, outdir, sorted_output): writer.writerow([value(obj)]) -def _save_total_cost_value(instance, outdir): +def save_total_cost_value(instance, outdir): with open(os.path.join(outdir, 'total_cost.txt'), 'w') as fh: fh.write('{}\n'.format(value(instance.SystemCost))) - - -def post_solve(instance, outdir): - """ - Minimum output generation for all model runs. - - """ - _save_generic_results(instance, outdir, instance.options.sorted_output) - _save_total_cost_value(instance, outdir) diff --git a/switch_model/utilities.py b/switch_model/utilities.py index 68f971772..4510846e7 100644 --- a/switch_model/utilities.py +++ b/switch_model/utilities.py @@ -104,6 +104,18 @@ def get_modules(model): yield sys.modules[m] +def make_iterable(item): + """Return an iterable for the one or more items passed.""" + if isinstance(item, basestring): + i = iter([item]) + else: + try: + # check if it's iterable + i = iter(item) + except TypeError: + i = iter([item]) + return i + def load_inputs(model, inputs_dir=None, attachDataPortal=True): """ From 673ee38dcef14e0a15c47e1a55ca2875f50db020 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Fri, 29 Jun 2018 10:32:35 -1000 Subject: [PATCH 38/51] Fix regression in treatment of --verbose by solve_scenarios. --- switch_model/solve_scenarios.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/switch_model/solve_scenarios.py b/switch_model/solve_scenarios.py index 44eb9b604..25fb17585 100755 --- a/switch_model/solve_scenarios.py +++ b/switch_model/solve_scenarios.py @@ -213,7 +213,7 @@ def get_scenario_name(scenario_args): def is_verbose(scenario_args): # check options settings for --verbose flag # note: this duplicates settings in switch_model.solve, so it may fall out of date - return parse_arg("--verbose", default=False, args=scenario_args) + return parse_arg("--verbose", action='store_true', default=False, args=scenario_args) def get_scenario_dict(): # note: we read the list from the disk each time so that we get a fresher version From 6bc4187057901e2e09f2665666ccf977e12821d4 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Fri, 29 Jun 2018 14:48:56 -1000 Subject: [PATCH 39/51] Streamline timekeeping, make --reload-prior-solution more robust. --- switch_model/solve.py | 122 +++++++++++++++++++++++--------------- switch_model/utilities.py | 37 ++++++++---- 2 files changed, 100 insertions(+), 59 deletions(-) diff --git a/switch_model/solve.py b/switch_model/solve.py index d0a993aa8..958f3e76a 100755 --- a/switch_model/solve.py +++ b/switch_model/solve.py @@ -1,19 +1,21 @@ #!/usr/bin/env python # Copyright (c) 2015-2017 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. -import sys, os, time, shlex, re +import sys, os, shlex, re from pyomo.environ import * from pyomo.opt import SolverFactory, SolverStatus, TerminationCondition import pyomo.version -from switch_model.utilities import create_model, _ArgumentParser, Logging +from switch_model.utilities import ( + create_model, _ArgumentParser, Logging, StepTimer, make_iterable +) from switch_model.upgrade import do_inputs_need_upgrade, upgrade_inputs def main(args=None, return_model=False, return_instance=False): - start_time = time.time() + timer = StepTimer() if args is None: # combine default arguments read from options.txt file with # additional arguments specified on the command line @@ -85,9 +87,8 @@ def debug(type, value, tb): iterate_modules = get_iteration_list(model) if model.options.verbose: - creation_time = time.time() print "\n=======================================================================" - print "SWITCH model created in {:.2f} s.\nArguments:".format(creation_time - start_time) + print "SWITCH model created in {:.2f} s.\nArguments:".format(timer.step_time()) print ", ".join(k+"="+repr(v) for k, v in model.options.__dict__.items() if v) print "Modules:\n"+", ".join(m for m in modules) if iterate_modules: @@ -95,12 +96,14 @@ def debug(type, value, tb): print "=======================================================================\n" print "Loading inputs..." - # create an instance + # create an instance (also reports time spent reading data and loading into model) instance = model.load_inputs() + + #### Below here, we refer to instance instead of model #### + instance.pre_solve() - instantiation_time = time.time() - if model.options.verbose: - print "Inputs loaded in {:.2f} s.\n".format(instantiation_time - creation_time) + if instance.options.verbose: + print "Total time spent constructing model: {:.2f} s.\n".format(timer.step_time()) # return the instance as-is if requested if return_instance: @@ -109,64 +112,49 @@ def debug(type, value, tb): else: return instance - if model.options.reload_prior_solution: + if instance.options.reload_prior_solution: # read variable values from previously solved model - import csv - var_objects = [c for c in instance.component_objects() - if isinstance(c,pyomo.core.base.Var)] - def _convert_if_numeric(s): - try: - return float(s) - except ValueError: - return s - for var in var_objects: - if '{}.tab'.format(var.name) not in os.listdir(model.options.outputs_dir): - raise RuntimeError("Tab output file for variable {} cannot be found in outputs directory. Exiting.".format(var.name)) - with open(os.path.join(model.options.outputs_dir, '{}.tab'.format(var.name)),'r') as f: - reader = csv.reader(f, delimiter='\t') - # skip headers - next(reader) - for row in reader: - index = (_convert_if_numeric(i) for i in row[:-1]) - var[index].value = float(row[-1]) - print 'Loaded variable {} values into instance.'.format(var.name) - output_loading_time = time.time() - print 'Finished loading previous results into model instance in {:.2f} s.'.format(output_loading_time - instantiation_time) + reload_prior_solution(instance) + if instance.options.verbose: + print( + 'Loaded previous results into model instance in {:.2f} s.' + .format(timer.step_time()) + ) else: # make sure the outputs_dir exists (used by some modules during iterate) # use a race-safe approach in case this code is run in parallel try: - os.makedirs(model.options.outputs_dir) + os.makedirs(instance.options.outputs_dir) except OSError: # directory probably exists already, but double-check - if not os.path.isdir(model.options.outputs_dir): + if not os.path.isdir(instance.options.outputs_dir): raise - # solve the model + # solve the model (reports time for each step as it goes) if iterate_modules: - if model.options.verbose: + if instance.options.verbose: print "Iterating model..." iterate(instance, iterate_modules) else: results = solve(instance) - if model.options.verbose: + if instance.options.verbose: print "Optimization termination condition was {}.\n".format( results.solver.termination_condition) + if instance.options.verbose: + timer.step_time() # restart counter for next step + # report/save results - if model.options.verbose: - post_solve_start_time = time.time() + if instance.options.verbose: print "Executing post solve functions..." instance.post_solve() - if model.options.verbose: - post_solve_end_time = time.time() - print "Post solve processing completed in {:.2f} s.".format( - post_solve_end_time - post_solve_start_time) + if instance.options.verbose: + print "Post solve processing completed in {:.2f} s.".format(timer.step_time()) # return stdout to original sys.stdout = stdout_copy - if model.options.interact or model.options.reload_prior_solution: + if instance.options.interact or instance.options.reload_prior_solution: m = instance # present the solved model as 'm' for convenience banner = ( "\n" @@ -180,7 +168,6 @@ def _convert_if_numeric(s): import code code.interact(banner=banner, local=dict(globals().items() + locals().items())) - patched_pyomo = False def patch_pyomo(): global patched_pyomo @@ -205,6 +192,48 @@ def new_construct(self, *args, **kwargs): pyomo.environ.Expression.construct = new_construct del m +def reload_prior_solution(instance): + """ + Assign values to all model variables from .tab files saved after + previous solution. + """ + import csv + var_objects = instance.component_objects(Var) + for var in var_objects: + var_file = os.path.join(instance.options.outputs_dir, '{}.tab'.format(var.name)) + if not os.path.isfile(var_file): + raise RuntimeError( + "Tab output file for variable {} cannot be found in outputs " + "directory. Exiting.".format(var.name) + ) + try: + # check types of the first tuple of keys for this variable + key_types = [type(i) for i in make_iterable(next(var.iterkeys()))] + except StopIteration: + key_types = [] # no keys + with open(var_file,'r') as f: + reader = csv.reader(f, delimiter='\t') + next(reader) # skip headers + for row in reader: + index = tuple(t(k) for t, k in zip(key_types, row[:-1])) + try: + v = var[index] + except KeyError: + raise KeyError( + "Unable to set value for {}[{}]; index is invalid." + .format(var.name, keys) + ) + if row[-1] == '': + # Variables that are not used in the model end up with no + # value after the solve and get saved as blanks; we skip those. + continue + val = float(row[-1]) + if v.is_integer() or v.is_binary(): + val = int(val) + v.value = val + if instance.options.verbose: + print 'Loaded variable {} values into instance.'.format(var.name) + def iterate(m, iterate_modules, depth=0): """Iterate through all modules listed in the iterate_list (usually iterate.txt), @@ -555,7 +584,7 @@ def solve(model): # solve the model if model.options.verbose: - solve_start_time = time.time() + timer = StepTimer() print "\nSolving model..." if model.options.tempdir is not None: @@ -566,8 +595,7 @@ def solve(model): results = model.solver_manager.solve(model, opt=model.solver, **solver_args) if model.options.verbose: - solve_end_time = time.time() - print "Solved model. Total time spent in solver: {:2f} s.".format(solve_end_time - solve_start_time) + print "Solved model. Total time spent in solver: {:2f} s.".format(timer.step_time()) model.solutions.load_from(results) diff --git a/switch_model/utilities.py b/switch_model/utilities.py index 4510846e7..77a25d456 100644 --- a/switch_model/utilities.py +++ b/switch_model/utilities.py @@ -5,12 +5,7 @@ Utility functions for SWITCH-pyomo. """ -import os -import types -import importlib -import re -import sys -import argparse +import os, types, importlib, re, sys, argparse, time import __main__ as main from pyomo.environ import * import pyomo.opt @@ -116,33 +111,51 @@ def make_iterable(item): i = iter([item]) return i -def load_inputs(model, inputs_dir=None, attachDataPortal=True): +class StepTimer(object): """ + Keep track of elapsed time for steps of a process. + Use timer = StepTimer() to create a timer, then retrieve elapsed time and/or + reset the timer at each step by calling timer.step_time() + """ + def __init__(self): + self.start_time = time.time() + def step_time(self): + """ + Reset timer to current time and return time elapsed since last step. + """ + last_start = self.start_time + self.start_time = now = time.time() + return now - last_start +def load_inputs(model, inputs_dir=None, attach_data_portal=True): + """ Load input data for an AbstractModel using the modules in the given list and return a model instance. This is implemented as calling the load_inputs() function of each module, if the module has that function. - """ if inputs_dir is None: inputs_dir = getattr(model.options, "inputs_dir", "inputs") # Load data; add a fancier load function to the data portal + timer = StepTimer() data = DataPortal(model=model) data.load_aug = types.MethodType(load_aug, data) for module in model.get_modules(): if hasattr(module, 'load_inputs'): module.load_inputs(model, data, inputs_dir) + if model.options.verbose: + print "Data read in {:.2f} s.\n".format(timer.step_time()) - # At some point, pyomo deprecated 'create' in favor of - # 'create_instance'. Determine which option is available - # and use that. + # At some point, pyomo deprecated 'create' in favor of 'create_instance'. + # Determine which option is available and use that. if hasattr(model, 'create_instance'): instance = model.create_instance(data) else: instance = model.create(data) + if model.options.verbose: + print "Instance created from data in {:.2f} s.\n".format(timer.step_time()) - if attachDataPortal: + if attach_data_portal: instance.DataPortal = data return instance From ace9876388df556d31f6e51f2273d1b8dea10254 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Sun, 1 Jul 2018 13:42:57 -1000 Subject: [PATCH 40/51] Refactor components to improve instance creation time and memory. --- .../energy_sources/fuel_costs/markets.py | 45 +++++++++---- .../generators/core/commit/operate.py | 67 ++++++++++--------- 2 files changed, 65 insertions(+), 47 deletions(-) diff --git a/switch_model/energy_sources/fuel_costs/markets.py b/switch_model/energy_sources/fuel_costs/markets.py index 3b1c8ae90..4dc92a022 100644 --- a/switch_model/energy_sources/fuel_costs/markets.py +++ b/switch_model/energy_sources/fuel_costs/markets.py @@ -75,7 +75,7 @@ def define_components(mod): ConsumeFuelTier[rfm, period, tier] is a decision variable that denotes the amount of fuel consumed in each tier of a supply curve - in a particular regional fuel market and period (MMBtu/year). It + in a particular regional fuel market and period (MMBtu/year). It has an upper bound of rfm_supply_tier_limit. FuelConsumptionInMarket[rfm, period] is a derived decision variable @@ -180,9 +180,9 @@ def define_components(mod): with the fuel are a much larger driver of consumption than the fuel costs. - GEN_TPS_FOR_RFM_PERIOD[regional_fuel_market, period] is an indexed set - of GEN_TP_FUELS that contribute to a given regional - fuel market's activity in a given period. + GENS_FOR_RFM_PERIOD[regional_fuel_market, period] is an indexed set + of GENS that contribute to a given regional fuel market's activity + in a given period. Enforce_Fuel_Consumption is a constraint that ties the aggregate fuel consumption from dispatch into FuelConsumptionInMarket variable @@ -291,19 +291,36 @@ def rfm_annual_costs(m, rfm, p): # Components to link aggregate fuel consumption from project # dispatch into market framework - mod.GEN_TPS_FOR_RFM_PERIOD = Set( - mod.REGIONAL_FUEL_MARKETS, mod.PERIODS, - within=mod.GEN_TP_FUELS, - initialize=lambda m, rfm, p: [ - (g, t, f) for (g, t, f) in m.GEN_TP_FUELS - if f == m.rfm_fuel[rfm] and - m.gen_load_zone[g] in m.ZONES_IN_RFM[rfm] and - m.tp_period[t] == p]) + def GENS_FOR_RFM_PERIOD_rule(m, rfm, p): + # Construct and cache a set of gens for each zone/fuel/period, then + # return lists of gens for each rfm/period as needed + try: + d = m.GENS_FOR_RFM_PERIOD_dict + except AttributeError: + d = m.GENS_FOR_RFM_PERIOD_dict = dict() + # d uses (zone, fuel, period) as key; could use (rfm, period) as key + # if m.zone_fuel_rfm (back-lookup) existed + for g in m.FUEL_BASED_GENS: + for f in m.FUELS_FOR_GEN[g]: + for p_ in m.PERIODS_FOR_GEN[g]: + d.setdefault((m.gen_load_zone[g], f, p_), []).append(g) + relevant_gens = [ + g + for z in m.ZONES_IN_RFM[rfm] + for g in d.pop((z, m.rfm_fuel[rfm], p), []) # pop releases memory + ] + return relevant_gens + mod.GENS_FOR_RFM_PERIOD = Set( + mod.REGIONAL_FUEL_MARKETS, mod.PERIODS, + initialize=GENS_FOR_RFM_PERIOD_rule + ) def Enforce_Fuel_Consumption_rule(m, rfm, p): return m.FuelConsumptionInMarket[rfm, p] == sum( - m.GenFuelUseRate[g, t, f] * m.tp_weight_in_year[t] - for (g, t, f) in m.GEN_TPS_FOR_RFM_PERIOD[rfm, p]) + m.GenFuelUseRate[g, t, m.rfm_fuel[rfm]] * m.tp_weight_in_year[t] + for g in m.GENS_FOR_RFM_PERIOD[rfm, p] + for t in m.TPS_IN_PERIOD[p] + ) mod.Enforce_Fuel_Consumption = Constraint( mod.REGIONAL_FUEL_MARKETS, mod.PERIODS, rule=Enforce_Fuel_Consumption_rule) diff --git a/switch_model/generators/core/commit/operate.py b/switch_model/generators/core/commit/operate.py index fe0323022..1638b2995 100644 --- a/switch_model/generators/core/commit/operate.py +++ b/switch_model/generators/core/commit/operate.py @@ -83,9 +83,9 @@ def define_components(mod): The capacity started up or shutdown is completely determined by the change in CommitGen from one hour to the next, but we can't calculate these directly within the linear program because linear - programs don't have if statements. Instead, we'll define extra decision - variables that are tightly constrained. Since startup incurs costs and - shutdown does not, the linear program will not simultaneously set both + programs don't have if statements. Instead, we'll define extra decision + variables that are tightly constrained. Since startup incurs costs and + shutdown does not, the linear program will not simultaneously set both of these to non-zero values. StartupGenCapacity[(g, t) in GEN_TPS] is a decision variable @@ -136,7 +136,7 @@ def define_components(mod): downtime constraints are active. These are the indexing sets for the Enforce_Min_Uptime and Enforce_Min_Downtime constraints, and are probably not useful elsewhere. - + Enforce_Min_Uptime[(g, tp) in UPTIME_CONSTRAINED_GEN_TPS] and Enforce_Min_Downtime[(g, tp) in DOWNTIME_CONSTRAINED_GEN_TPS] are constraints that ensure that unit commitment respects the minimum @@ -158,7 +158,7 @@ def define_components(mod): rules. On the other hand any capacity that could have been committed at some point in the lookback window can be startup now, possibly replacing other units that were shutdown recently. - + -- Dispatch limits based on committed capacity -- gen_min_load_fraction[g] describes the minimum loading level of a @@ -174,7 +174,7 @@ def define_components(mod): gen_min_load_fraction, but has separate entries for each timepoint. This could be used, for example, for non-curtailable renewable energy projects. This defaults to the value of gen_min_load_fraction[g]. - + gen_min_cap_factor[(g, t) in GEN_TPS] describes the minimum loadding level for each project and timepoint as a fraction of committed capacity. This is an optional parameter that defaults @@ -262,23 +262,24 @@ def define_components(mod): within=NonNegativeReals) mod.Commit_StartupGenCapacity_ShutdownGenCapacity_Consistency = Constraint( mod.GEN_TPS, - rule=lambda m, g, t: - m.CommitGen[g, m.tp_previous[t]] - + m.StartupGenCapacity[g, t] - m.ShutdownGenCapacity[g, t] + rule=lambda m, g, t: + m.CommitGen[g, m.tp_previous[t]] + + m.StartupGenCapacity[g, t] - m.ShutdownGenCapacity[g, t] == m.CommitGen[g, t]) - + # StartupGenCapacity costs mod.gen_startup_fuel = Param(mod.FUEL_BASED_GENS, default=0.0) mod.gen_startup_om = Param(mod.GENERATION_PROJECTS, default=0.0) - # StartupGenCapacity costs need to be divided over the duration of the - # timepoint because it is a one-time expenditure in units of $ - # but Cost_Components_Per_TP requires an hourly cost rate in $ / hr. + # Note: lump-sum startup O&M cost is divided by the duration of the + # timepoint to give a cost-per-hour during this timepoint, as needed by + # Cost_Components_Per_TP. mod.Total_StartupGenCapacity_OM_Costs = Expression( mod.TIMEPOINTS, rule=lambda m, t: sum( m.gen_startup_om[g] * m.StartupGenCapacity[g, t] / m.tp_duration_hrs[t] - for (g, t2) in m.GEN_TPS - if t == t2)) + for g in m.GENS_IN_PERIOD[m.tp_period[t]] + ) + ) mod.Cost_Components_Per_TP.append('Total_StartupGenCapacity_OM_Costs') mod.gen_min_uptime = Param( @@ -290,16 +291,16 @@ def define_components(mod): within=NonNegativeReals, default=0.0) mod.UPTIME_CONSTRAINED_GEN_TPS = Set(dimen=2, initialize=lambda m: [ - (g, tp) + (g, tp) for g in m.GENERATION_PROJECTS if m.gen_min_uptime[g] > 0.0 - for tp in m.TPS_FOR_GEN[g] + for tp in m.TPS_FOR_GEN[g] ]) mod.DOWNTIME_CONSTRAINED_GEN_TPS = Set(dimen=2, initialize=lambda m: [ - (g, tp) + (g, tp) for g in m.GENERATION_PROJECTS if m.gen_min_downtime[g] > 0.0 - for tp in m.TPS_FOR_GEN[g] + for tp in m.TPS_FOR_GEN[g] ]) - + def tp_prev(m, tp, n=1): # find nth previous timepoint, wrapping from start to end of day return m.TPS_IN_TS[m.tp_ts[tp]].prevw(tp, n) @@ -308,10 +309,10 @@ def min_time_rule(m, g, tp, up): """ This uses a simple rule: all capacity turned on in the last x hours must still be on now (or all capacity recently turned off must still be off).""" - + # how many timepoints must the project stay on/off once it's # started/shutdown? - # note: StartupGenCapacity and ShutdownGenCapacity are assumed to occur at the start of + # note: StartupGenCapacity and ShutdownGenCapacity are assumed to occur at the start of # the timepoint n_tp = int(round( (m.gen_min_uptime[g] if up else m.gen_min_downtime[g]) @@ -325,31 +326,31 @@ def min_time_rule(m, g, tp, up): # behavior of range()), because the current timepoint is # included in the duration when the capacity will be on/off. if up: - rule = ( - # online capacity >= recent startups + rule = ( + # online capacity >= recent startups # (all recent startups are still online) - m.CommitGen[g, tp] - >= + m.CommitGen[g, tp] + >= sum(m.StartupGenCapacity[g, tp_prev(m, tp, i)] for i in range(n_tp)) ) else: # Find the largest fraction of capacity that could have # been committed in the last x hours, including the # current hour. We assume that everything above this band - # must remain turned off (e.g., on maintenance outage). + # must remain turned off (e.g., on maintenance outage). # Note: this band extends one step prior to the first # relevant shutdown, since that capacity could have been # online in the prior step. committable_fraction = m.gen_availability[g] * max( - m.gen_max_commit_fraction[g, tp_prev(m, tp, i)] + m.gen_max_commit_fraction[g, tp_prev(m, tp, i)] for i in range(n_tp+1) ) - rule = ( + rule = ( # committable capacity - committed >= recent shutdowns # (all recent shutdowns are still offline) m.GenCapacityInTP[g, tp] * committable_fraction - - m.CommitGen[g, tp] - >= + - m.CommitGen[g, tp] + >= sum(m.ShutdownGenCapacity[g, tp_prev(m, tp, i)] for i in range(n_tp)) ) return rule @@ -359,7 +360,7 @@ def min_time_rule(m, g, tp, up): mod.Enforce_Min_Downtime = Constraint( mod.DOWNTIME_CONSTRAINED_GEN_TPS, rule=lambda *a: min_time_rule(*a, up=False) ) - + # Dispatch limits relative to committed capacity. mod.gen_min_load_fraction = Param( mod.GENERATION_PROJECTS, @@ -430,5 +431,5 @@ def load_inputs(mod, switch_data, inputs_dir): optional=True, filename=os.path.join(inputs_dir, 'gen_timepoint_commit_bounds.tab'), auto_select=True, - param=(mod.gen_min_commit_fraction, + param=(mod.gen_min_commit_fraction, mod.gen_max_commit_fraction, mod.gen_min_load_fraction_TP)) From e41ed440d9322c0b054c6fc4310a5d4e9aee5673 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Mon, 2 Jul 2018 13:18:25 -1000 Subject: [PATCH 41/51] Update federal tax credit calculations --- switch_model/hawaii/fed_subsidies.py | 82 +++++++++++++++------------- 1 file changed, 45 insertions(+), 37 deletions(-) diff --git a/switch_model/hawaii/fed_subsidies.py b/switch_model/hawaii/fed_subsidies.py index ccb8454ba..fa323bd39 100644 --- a/switch_model/hawaii/fed_subsidies.py +++ b/switch_model/hawaii/fed_subsidies.py @@ -5,44 +5,52 @@ def define_components(m): """ incorporate the effect of federal subsidies """ - + + # note: wind/solar/geothermal production tax credit expires in 2017-2019, + # so we ignore that (http://programs.dsireusa.org/system/program/detail/734) + # TODO: move these values into data files - wind_energy_source = 'WND' - # approx lifetime average credit, based on 2014$ 0.023/kWh for first 10 years of project - wind_prod_tax_credit = 0.015 * 1000 # $/MWh - solar_energy_source = 'SUN' - solar_invest_tax_credit = 0.3 # fraction of capital cost + itc_rates = { + # DistPV from http://programs.dsireusa.org/system/program/detail/1235 + (2018, 'DistPV'): 0.3, + (2019, 'DistPV'): 0.3, + (2020, 'DistPV'): 0.3, + (2021, 'DistPV'): 0.3, + # Wind, Solar and Geothermal ITC from + # http://programs.dsireusa.org/system/program/detail/658 + (2018, 'CentralTrackingPV'): 0.3, + (2019, 'CentralTrackingPV'): 0.3, + (2020, 'CentralTrackingPV'): 0.26, + (2021, 'CentralTrackingPV'): 0.22, + (2022, 'CentralTrackingPV'): 0.10, + (2018, 'OnshoreWind'): 0.22, + (2019, 'OnshoreWind'): 0.12, + (2018, 'OffshoreWind'): 0.22, + (2019, 'OffshoreWind'): 0.12, + } + itc_rates.update({ + (y, 'CentralTrackingPV'): 0.1 + for y in range(2023, 2051) + }) + itc_rates.update({ # clone the CentralTrackingPV entries + (y, 'CentralFixedPV'): itc_rates[y, 'CentralTrackingPV'] + for y in range(2018, 2051) + }) + itc_rates.update({ + (y, 'Geothermal'): 0.1 + for y in range(2018, 2051) + }) - # note: wind PTC expired at end of 2014; solar expires at end of 2016, - # except for continuing 10% business investment tax credit. - - # note: here we assume that existing projects and new (unbuilt) projects - # are defined separately - m.NEW_GENS = Set(initialize=lambda m: set(p for (p, y) in m.NEW_GEN_BLD_YRS)) - - # model the wind production tax credit - m.Wind_Subsidy_Hourly = Expression( - m.TIMEPOINTS, - rule=lambda m, t: -wind_prod_tax_credit * sum( - m.DispatchGen[p, t] - for p in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[wind_energy_source] - if p in m.NEW_GENS and (p, t) in m.GEN_TPS + # model the renewable investment tax credit as simply prorating the annual capital cost + m.Federal_Investment_Tax_Credit_Annual = Expression( + m.PERIODS, + rule=lambda m, pe: sum( + -itc_rates[bld_yr, m.gen_tech[g]] + * m.BuildGen[g, bld_yr] + * m.gen_capital_cost_annual[g, bld_yr] + for g in m.NON_FUEL_BASED_GENS + for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, pe] + if (bld_yr, m.gen_tech[g]) in itc_rates ) ) - m.Cost_Components_Per_TP.append('Wind_Subsidy_Hourly') - - # model the solar tax credit as simply prorating the annual capital cost - m.Solar_Credit_Annual = Expression(m.PERIODS, rule=lambda m, pe: - -solar_invest_tax_credit * sum( - m.BuildGen[g, bld_yr] * m.gen_capital_cost_annual[g, bld_yr] - for g in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[solar_energy_source] - if g in m.NEW_GENS - for bld_yr in m.BLD_YRS_FOR_GEN_PERIOD[g, pe])) - # # another version: - # m.Solar_Credit_Annual = Expression(m.PERIODS, rule=lambda m, pe: - # -solar_invest_tax_credit * sum( - # m.BuildGen[g, bld_yr] * m.gen_capital_cost_annual[g, bld_yr] - # for (g, bld_yr) in m.NEW_GEN_BLD_YRS - # if (pe in m.PERIODS_FOR_GEN_BLD_YR[g, bld_yr] - # and m.gen_energy_source[g] == solar_energy_source))) - m.Cost_Components_Per_Period.append('Solar_Credit_Annual') + m.Cost_Components_Per_Period.append('Federal_Investment_Tax_Credit_Annual') From 0a7243dd181f6a5c8e9964565f2b4f7f9c299a0e Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Mon, 2 Jul 2018 13:20:24 -1000 Subject: [PATCH 42/51] Move solution-loading code after feasibility test, to avoid uninitialized value errors for infeasible models --- switch_model/solve.py | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/switch_model/solve.py b/switch_model/solve.py index 2d2f07082..3782d8815 100755 --- a/switch_model/solve.py +++ b/switch_model/solve.py @@ -368,12 +368,12 @@ def define_arguments(argparser): # whether that does the same thing as --solver-options-string so we don't reuse the same name. argparser.add_argument("--solver-options-string", default=None, help='A quoted string of options to pass to the model solver. Each option must be of the form option=value. ' - '(e.g., --solver-options-string "mipgap=0.001 primalopt advance=2 threads=1")') + '(e.g., --solver-options-string "mipgap=0.001 primalopt=\'\' advance=2 threads=1")') argparser.add_argument("--keepfiles", action='store_true', default=None, help="Keep temporary files produced by the solver (may be useful with --symbolic-solver-labels)") argparser.add_argument( "--stream-output", "--stream-solver", action='store_true', dest="tee", default=None, - help="Display information from the solver about its progress (usually combined with a suitable --solver-options string)") + help="Display information from the solver about its progress (usually combined with a suitable --solver-options-string)") argparser.add_argument( "--symbolic-solver-labels", action='store_true', default=None, help='Use symbol names derived from the model when interfacing with the solver. ' @@ -603,19 +603,9 @@ def solve(model): results = model.solver_manager.solve(model, opt=model.solver, **solver_args) - # Load the solution data into the results object (it only has execution - # metadata by default in recent versions of Pyomo). This will enable us to - # save and restore model solutions; the results object can be pickled to a - # file on disk, but the instance cannot. - # https://stackoverflow.com/questions/39941520/pyomo-ipopt-does-not-return-solution - # - model.solutions.store_to(results) - model.last_results = results - if model.options.verbose: print "Solved model. Total time spent in solver: {:2f} s.".format(timer.step_time()) - # Only return if the model solved correctly, otherwise throw a useful error if(results.solver.status in {SolverStatus.ok, SolverStatus.warning} and results.solver.termination_condition == TerminationCondition.optimal): @@ -637,6 +627,15 @@ def solve(model): print "Hint: glpk has been known to classify infeasible problems as 'other'." raise RuntimeError("Solver failed to find an optimal solution.") + # Copy the solution data into the results object (it only has execution + # metadata by default in recent versions of Pyomo). This will enable us to + # save and restore model solutions; the results object can be pickled to a + # file on disk, but the instance cannot. + # https://stackoverflow.com/questions/39941520/pyomo-ipopt-does-not-return-solution + # Note: this will fail if the solver doesn't return values for the variables + # (e.g., for infeasible models), so it should happen after the feasibility test. + model.solutions.store_to(results) + model.last_results = results # taken from https://software.sandia.gov/trac/pyomo/browser/pyomo/trunk/pyomo/opt/base/solvers.py?rev=10784 # This can be removed when all users are on Pyomo 4.2 From b23b2f99d5d6da6b211f30a493edb1bffa4de65a Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Mon, 2 Jul 2018 13:22:27 -1000 Subject: [PATCH 43/51] Initial support for pumped hydro using standard storage components (untested, needs code to prevent switching directions when providing reserves) --- switch_model/hawaii/lake_wilson.py | 26 ++++++++++++++++++++++++++ switch_model/hawaii/scenario_data.py | 6 +++--- 2 files changed, 29 insertions(+), 3 deletions(-) create mode 100644 switch_model/hawaii/lake_wilson.py diff --git a/switch_model/hawaii/lake_wilson.py b/switch_model/hawaii/lake_wilson.py new file mode 100644 index 000000000..9f5aab3fc --- /dev/null +++ b/switch_model/hawaii/lake_wilson.py @@ -0,0 +1,26 @@ +""" +Special modeling for Lake Wilson - relax daily energy balance by 10 MW to account +for net inflow. +""" +from pyomo.environ import * + +def define_components(m): + def rule(m): + g = 'Oahu_Lake_Wilson' + inflow = 10.0 + if g in m.GENERATION_PROJECTS: + for t in m.TPS_FOR_GEN[g]: + # assign new energy balance with extra inflow, and allow spilling + m.Track_State_Of_Charge[g, t] = ( + m.StateOfCharge[g, t] + <= + m.StateOfCharge[g, m.tp_previous[t]] + + (m.ChargeStorage[g, t] * m.gen_storage_efficiency[g] + - m.DispatchGen[g, t]) * m.tp_duration_hrs[t] + # allow inflow only if capacity is built + + inflow * m.tp_duration_hrs * m.GenCapacityInTP[g] / m.gen_unit_size[g] + ) + m.Add_Lake_Wilson_Inflow = BuildAction(rule=rule) + +# TODO: don't allow zero crossing when calculating reserves available +# see http://www.ucdenver.edu/faculty-staff/dmays/3414/Documents/Antal-MS-2014.pdf diff --git a/switch_model/hawaii/scenario_data.py b/switch_model/hawaii/scenario_data.py index 3ba40224b..4a9166744 100644 --- a/switch_model/hawaii/scenario_data.py +++ b/switch_model/hawaii/scenario_data.py @@ -399,8 +399,8 @@ def write_tables(**args): cogen as gen_is_cogen, -- non_cycling as gen_non_cycling, variable_o_m * 1000.0 AS gen_variable_om, - CASE WHEN fuel IN ('SUN', 'WND', 'MSW', 'Battery') THEN fuel ELSE 'multiple' END AS gen_energy_source, - CASE WHEN fuel IN ('SUN', 'WND', 'MSW', 'Battery') THEN null ELSE {flhr} END AS gen_full_load_heat_rate, + CASE WHEN fuel IN ('SUN', 'WND', 'MSW', 'Battery', 'Hydro') THEN fuel ELSE 'multiple' END AS gen_energy_source, + CASE WHEN fuel IN ('SUN', 'WND', 'MSW', 'Battery', 'Hydro') THEN null ELSE {flhr} END AS gen_full_load_heat_rate, min_uptime as gen_min_uptime, min_downtime as gen_min_downtime, startup_energy / unit_size as gen_startup_fuel, @@ -592,7 +592,7 @@ def write_tables(**args): cogen FROM study_generator_info ), all_fueled_techs AS ( - SELECT * from all_techs WHERE orig_fuel NOT IN ('SUN', 'WND', 'MSW', 'Battery') + SELECT * from all_techs WHERE orig_fuel NOT IN ('SUN', 'WND', 'MSW', 'Battery', 'Hydro') ), gen_multiple_fuels AS ( SELECT DISTINCT technology, b.energy_source as fuel FROM all_fueled_techs t From 996e70a2a22c1cc949917756aa652c3e11fe65e0 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 12 Jul 2018 11:41:30 -1000 Subject: [PATCH 44/51] Add ev_advanced module to hawaii package (pre-seeded, non-iterative Dantzig-Wolfe decomposition of EV fleet charging requirements) --- .gitignore | 1 + switch_model/hawaii/ev_advanced.py | 226 +++++++++++++++++++++++++++ switch_model/hawaii/psip_2016_12.py | 14 +- switch_model/hawaii/scenario_data.py | 98 +++++++++++- 4 files changed, 326 insertions(+), 13 deletions(-) create mode 100644 switch_model/hawaii/ev_advanced.py diff --git a/.gitignore b/.gitignore index 0c43fd99c..177181a78 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ gurobi.log switch_model.egg-info/ venv +build/ diff --git a/switch_model/hawaii/ev_advanced.py b/switch_model/hawaii/ev_advanced.py new file mode 100644 index 000000000..aa2f29276 --- /dev/null +++ b/switch_model/hawaii/ev_advanced.py @@ -0,0 +1,226 @@ +import os +from pyomo.environ import * + +def define_arguments(argparser): + argparser.add_argument("--ev-timing", choices=['bau', 'optimal'], default='optimal', + help="Rule for when to charge EVs -- business-as-usual (upon arrival), flat around the clock, or optimal (default).") + argparser.add_argument('--ev-reserve-types', nargs='+', default=['spinning'], + help= + "Type(s) of reserves to provide from electric-vehicle charging (e.g., 'contingency' or 'regulation')." + "Default is generic 'spinning'. Specify 'none' to disable. Only takes effect with '--ev-timing optimal'." + ) + +# parameters describing the EV and ICE fleet each year, all indexed by zone, +# vehicle type and period +ev_zone_type_period_params = [ + "n_vehicles", + "ice_gals_per_year", "ice_fuel", "ev_kwh_per_year", + "ev_extra_cost_per_vehicle_year" +] + +def define_components(m): + + # indexing set for EV bids, decomposed to get sets of EV bid numbers and EV types + m.EV_ZONE_TYPE_BID_TP = Set(dimen=4) # load zone, vehicle type, bid number, timepoint + def rule(m): + bids = m.EV_BID_NUMS_set = set() + types = m.EV_TYPES_set = set() + for z, t, n, tp in m.EV_ZONE_TYPE_BID_TP: + bids.add(n) + types.add(t) + m.Split_EV_Sets = BuildAction(rule=rule) + m.EV_BID_NUMS = Set(initialize=lambda m: m.EV_BID_NUMS_set) + m.EV_TYPES = Set(initialize=lambda m: m.EV_TYPES_set) + + # parameters describing the EV and ICE fleet each year + + # fraction of vehicle fleet that will be electrified in each period (0-1) + # (could eventually be a decision variable) + m.ev_share = Param(m.LOAD_ZONES, m.PERIODS, within=PercentFraction) + for p in ev_zone_type_period_params: + setattr(m, p, Param(m.LOAD_ZONES, m.EV_TYPES, m.PERIODS)) + + # calculate the extra annual cost (non-fuel) of having EVs, relative to ICEs, + # for batteries and chargers + m.ev_extra_annual_cost = Param( + m.PERIODS, initialize=lambda m, p: + sum( + m.ev_share[z, p] + * m.n_vehicles[z, t, p] + * m.ev_extra_cost_per_vehicle_year[z, t, p] + for z in m.LOAD_ZONES + for t in m.EV_TYPES + ) + ) + # calculate total fuel cost for ICE (non-EV) VMTs + motor_fuel_mmbtu_per_gallon = { + # from https://www.eia.gov/Energyexplained/?page=about_energy_units + "Motor_Gasoline": 0.120476, + "Motor_Diesel": 0.137452 + } + if hasattr(m, "rfm_supply_tier_cost"): + ice_fuel_cost_func = lambda m, z, p, f: m.rfm_supply_tier_cost[m.zone_rfm[z, f], p, 'base'] + else: + ice_fuel_cost_func = lambda m, z, p, f: m.fuel_cost[z, f, p] + m.ice_annual_fuel_cost = Param(m.PERIODS, initialize=lambda m, p: + sum( + (1.0 - m.ev_share[z, p]) + * m.n_vehicles[z, t, p] + * m.ice_gals_per_year[z, t, p] + * motor_fuel_mmbtu_per_gallon[m.ice_fuel[z, t, p]] + * ice_fuel_cost_func(m, z, p, m.ice_fuel[z, t, p]) + for z in m.LOAD_ZONES + for t in m.EV_TYPES + ) + ) + + # add cost components to account for the vehicle miles traveled via EV or ICE + # (not used because it interferes with calculation of cost per kWh for electricity) + # m.Cost_Components_Per_Period.append('ev_extra_annual_cost') + # m.Cost_Components_Per_Period.append('ice_annual_fuel_cost') + + # EV bid data -- total MW used by 100% EV fleet, for each zone, veh type, + # bid number, timepoint + m.ev_bid_by_type = Param(m.EV_ZONE_TYPE_BID_TP) + + # aggregate across vehicle types (types are only needed for reporting) + m.ev_bid_mw = Param( + m.LOAD_ZONES, m.EV_BID_NUMS, m.TIMEPOINTS, + initialize=lambda m, z, n, tp: + sum(m.ev_bid_by_type[z, t, n, tp] for t in m.EV_TYPES) + ) + + # find lowest and highest possible charging in each timepoint, used for reserve calcs + m.ev_charge_min = Param( + m.LOAD_ZONES, m.TIMEPOINTS, + initialize=lambda m, z, tp: min(m.ev_bid_mw[z, n, tp] for n in m.EV_BID_NUMS) + ) + m.ev_charge_max = Param( + m.LOAD_ZONES, m.TIMEPOINTS, + initialize=lambda m, z, tp: max(m.ev_bid_mw[z, n, tp] for n in m.EV_BID_NUMS) + ) + + # decide which share of the fleet to allocate to each charging bid + m.EVBidWeight = Var(m.LOAD_ZONES, m.TIMESERIES, m.EV_BID_NUMS, within=PercentFraction) + m.Charge_Enough_EVs = Constraint( + m.LOAD_ZONES, m.TIMESERIES, + rule=lambda m, z, ts: + sum(m.EVBidWeight[z, ts, n] for n in m.EV_BID_NUMS) == m.ev_share[z, m.ts_period[ts]] + ) + + # calculate total EV charging + m.ChargeEVs = Expression( + m.LOAD_ZONES, m.TIMEPOINTS, + rule=lambda m, z, tp: sum( + m.EVBidWeight[z, m.tp_ts[tp], n] * m.ev_bid_mw[z, n, tp] + for n in m.EV_BID_NUMS + ) + ) + + # set rules for when to charge EVs + # note: this could be generalized to fractions between 0% and 100% BAU + if m.options.ev_timing == "optimal": + if m.options.verbose: + print "Charging EVs at best time each day." + # no extra code needed + elif m.options.ev_timing == "bau": + if m.options.verbose: + print "Charging EVs at business-as-usual times of day." + # give full weight to BAU bid (number 0) + m.ChargeEVs_bau = Constraint( + m.LOAD_ZONES, m.EV_BID_NUMS, m.TIMESERIES, + rule=lambda m, z, n, ts: ( + m.EVBidWeight[z, ts, n] + == (m.ev_share[z, m.ts_period[ts]] if n == 0 else 0) + ) + ) + else: + # should never happen + raise ValueError("Invalid value specified for --ev-timing: {}".format(str(m.options.ev_timing))) + + # add the EV load to the model's energy balance + m.Zone_Power_Withdrawals.append('ChargeEVs') + + # Register with spinning reserves if it is available and optimal EV charging is enabled. + if [rt.lower() for rt in m.options.ev_reserve_types] != ['none'] and m.options.ev_timing == "optimal": + if hasattr(m, 'Spinning_Reserve_Up_Provisions'): + # calculate available slack from EV charging + # (from supply perspective, so "up" means less load) + m.EVSlackUp = Expression( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, t: sum( + m.ChargeEVs[z, t] - m.ev_charge_min[z, t] + for z in m.ZONES_IN_BALANCING_AREA[b] + ) + ) + m.EVSlackDown = Expression( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, t: sum( + m.ev_charge_max[z, t] - m.ChargeEVs[z, t] + for z in m.ZONES_IN_BALANCING_AREA[b] + ) + ) + if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + # using advanced formulation, index by reserve type, balancing area, timepoint. + # define variables for each type of reserves to be provided + # choose how to allocate the slack between the different reserve products + m.EV_SPINNING_RESERVE_TYPES = Set( + initialize=m.options.ev_reserve_types + ) + m.EVSpinningReserveUp = Var( + m.EV_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals + ) + m.EVSpinningReserveDown = Var( + m.EV_SPINNING_RESERVE_TYPES, m.BALANCING_AREA_TIMEPOINTS, + within=NonNegativeReals + ) + # constrain reserve provision within available slack + m.Limit_EVSpinningReserveUp = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: + sum( + m.EVSpinningReserveUp[rt, ba, tp] + for rt in m.EV_SPINNING_RESERVE_TYPES + ) <= m.EVSlackUp[ba, tp] + ) + m.Limit_EVSpinningReserveUp = Constraint( + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: + sum( + m.EVSpinningReserveDown[rt, ba, tp] + for rt in m.EV_SPINNING_RESERVE_TYPES + ) <= m.EVSlackDown[ba, tp] + ) + m.Spinning_Reserve_Up_Provisions.append('EVSpinningReserveUp') + m.Spinning_Reserve_Down_Provisions.append('EVSpinningReserveDown') + else: + # using older formulation, only one type of spinning reserves, indexed by balancing area, timepoint + if m.options.ev_reserve_types != ['spinning']: + raise ValueError( + 'Unable to use reserve types other than "spinning" with simple spinning reserves module.' + ) + m.Spinning_Reserve_Up_Provisions.append('EVSlackUp') + m.Spinning_Reserve_Down_Provisions.append('EVSlacDown') + + +def load_inputs(m, switch_data, inputs_dir): + """ + Import ev data from .tab files. + """ + switch_data.load_aug( + filename=os.path.join(inputs_dir, 'ev_share.tab'), + auto_select=True, + param=m.ev_share + ) + switch_data.load_aug( + filename=os.path.join(inputs_dir, 'ev_fleet_info_advanced.tab'), + auto_select=True, + param=[getattr(m, p) for p in ev_zone_type_period_params] + ) + switch_data.load_aug( + filename=os.path.join(inputs_dir, 'ev_charging_bids.tab'), + auto_select=True, + param=m.ev_bid_by_type, + index=m.EV_ZONE_TYPE_BID_TP + ) diff --git a/switch_model/hawaii/psip_2016_12.py b/switch_model/hawaii/psip_2016_12.py index 850e82ae2..3a4f50e9a 100644 --- a/switch_model/hawaii/psip_2016_12.py +++ b/switch_model/hawaii/psip_2016_12.py @@ -46,7 +46,7 @@ def define_components(m): print "Using PSIP construction plan." else: print "Relaxing PSIP construction plan." - + # don't allow addition of anything other than those specified here # force retirement of AES at end of 2022 @@ -120,13 +120,14 @@ def define_components(m): # specific location (21.668 -157.956), but since it isn't in the existing plants # workbook, we represent it as a generic technology target. # note: Resolve modeled 134 MW of planned onshore wind, 30 MW of optional onshore - # and 800 MW of optional offshore; See "data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input and Output Files by Case/E3 and Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/capacity_limits.tab". - # planned seems to correspond to Na Pua Makani (24), CBRE (10), Kahuku (30), Kawailoka (69); + # and 800 MW of optional offshore; See "data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input + # and Output Files by Case/E3 and Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/capacity_limits.tab". + # 'planned' seems to correspond to Na Pua Makani (24), CBRE (10), Kahuku (30), Kawailoka (69); # Resolve built 273 MW offshore in 2025-45 (including 143 MW rebuilt in 2045), # and 30 MW onshore in 2045 (tables 3-1 and 3-4). # Not clear why it picked offshore before onshore (maybe bad resource profiles?). But - # in their final plan (table 4-1), HECO changed it to 200 MW offshore in 2025 (presumably rebuilt - # in 2045) and 30 MW onshore in 2045. + # in their final plan (table 4-1), HECO changed it to 200 MW offshore in 2025 + # (presumably rebuilt in 2045) and 30 MW onshore in 2045. (2018, 'OnshoreWind', 24), # Na Pua Makani (NPM) wind (2018, 'OnshoreWind', 10), # CBRE wind # note: 109.6 MW SunEdison replacements are in Existing Plants workbook. @@ -160,7 +161,8 @@ def define_components(m): (2040, 'Battery_4', 420), (2045, 'Battery_4', 1525), # RESOLVE modeled 4-hour batteries as being capable of providing reserves, - # and didn't model contingency batteries (see data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input and Output Files by Case/E3 and Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/technologies.tab). + # and didn't model contingency batteries (see data/HECO Plans/PSIP-WebDAV/2017-01-31 Response to Parties IRs/CA-IR-1/Input + # and Output Files by Case/E3 and Company Defined Cases/Market DGPV (Reference)/OA_NOLNG/technologies.tab). # Then HECO added a 90 MW contingency battery (table 4-1 of PSIP 2016-12-23). # Note: RESOLVE can get reserves from batteries (they only considered 4-hour batteries), but not # from EVs or flexible demand. diff --git a/switch_model/hawaii/scenario_data.py b/switch_model/hawaii/scenario_data.py index 4a9166744..88ceeace4 100644 --- a/switch_model/hawaii/scenario_data.py +++ b/switch_model/hawaii/scenario_data.py @@ -238,7 +238,7 @@ def write_tables(**args): # gather info on fuels write_table('fuels.tab', """ - SELECT DISTINCT c.fuel_type AS fuel, co2_intensity, 0.0 AS upstream_co2_intensity, rps_eligible + SELECT DISTINCT replace(c.fuel_type, ' ', '_') AS fuel, co2_intensity, 0.0 AS upstream_co2_intensity, rps_eligible FROM fuel_costs c JOIN energy_source_properties p on (p.energy_source = c.fuel_type) WHERE load_zone in %(load_zones)s AND fuel_scen_id=%(fuel_scen_id)s; """, args) @@ -284,7 +284,7 @@ def write_tables(**args): write_table('fuel_cost.tab', with_period_length + """ - SELECT load_zone, fuel_type as fuel, p.period, + SELECT load_zone, replace(fuel_type, ' ', '_') as fuel, p.period, avg(price_mmbtu * {inflator} + COALESCE(fixed_cost, 0.00)) as fuel_cost FROM fuel_costs c, study_periods p JOIN period_length l USING (period) WHERE load_zone in %(load_zones)s @@ -299,15 +299,17 @@ def write_tables(**args): # advanced fuel markets with LNG expansion options (used by forward-looking models) # (use fuel_markets module) write_table('regional_fuel_markets.tab', """ - SELECT DISTINCT concat('Hawaii_', fuel_type) AS regional_fuel_market, fuel_type AS fuel + SELECT DISTINCT + concat('Hawaii_', replace(fuel_type, ' ', '_')) AS regional_fuel_market, + replace(fuel_type, ' ', '_') AS fuel FROM fuel_costs WHERE load_zone in %(load_zones)s AND fuel_scen_id = %(fuel_scen_id)s; """, args) write_table('fuel_supply_curves.tab', with_period_length + """ - SELECT concat('Hawaii_', fuel_type) as regional_fuel_market, - fuel_type as fuel, + SELECT concat('Hawaii_', replace(fuel_type, ' ', '_')) as regional_fuel_market, + replace(fuel_type, ' ', '_') as fuel, tier, p.period, avg(price_mmbtu * {inflator}) as unit_cost, @@ -324,7 +326,7 @@ def write_tables(**args): """.format(inflator=inflator), args) write_table('zone_to_regional_fuel_market.tab', """ - SELECT DISTINCT load_zone, concat('Hawaii_', fuel_type) AS regional_fuel_market + SELECT DISTINCT load_zone, concat('Hawaii_', replace(fuel_type, ' ', '_')) AS regional_fuel_market FROM fuel_costs WHERE load_zone in %(load_zones)s AND fuel_scen_id = %(fuel_scen_id)s; """, args) @@ -712,7 +714,7 @@ def write_tables(**args): ) ######################### - # EV annual energy consumption + # EV annual energy consumption (original, basic version) # print "ev_scenario:", args.get('ev_scenario', None) if args.get('ev_scenario', None) is not None: write_table('ev_fleet_info.tab', """ @@ -743,6 +745,88 @@ def write_tables(**args): ORDER BY 1, 2; """, args) + ######################### + # EV annual energy consumption (advanced, frozen Dantzig-Wolfe version) + if args.get('ev_scenario', None) is not None: + write_table('ev_share.tab', """ + SELECT + load_zone as "LOAD_ZONE", period as "PERIOD", + ev_share + FROM ev_adoption a JOIN study_periods p on a.year = p.period + WHERE load_zone in %(load_zones)s + AND time_sample = %(time_sample)s + AND ev_scenario = %(ev_scenario)s + ORDER BY 1, 2; + """, args) + write_table('ev_fleet_info_advanced.tab', """ + WITH detailed_fleet AS ( + SELECT + a.load_zone AS "LOAD_ZONE", + replace(f."vehicle type", ' ', '_') AS "VEHICLE_TYPE", + p.period AS "PERIOD", + f."number of vehicles" AS "n_vehicles", -- for whole fleet, not current adoption level + CASE + WHEN period <= 2020 THEN "gals fuel per year 2020" + WHEN period >= 2045 THEN "gals fuel per year 2045" + ELSE + (period-2020)/25.0 * "gals fuel per year 2045" + + (2045-period)/25.0 * "gals fuel per year 2020" + END AS "ice_gals_per_year", + CONCAT_WS('_', 'Motor', "ICE fuel") AS "ice_fuel", + "kWh per year" AS "ev_kwh_per_year", + CASE + WHEN period <= 2020 THEN "EV extra capital cost per year 2020" + WHEN period >= 2045 THEN "EV extra capital cost per year 2045" + ELSE + (period-2020)/25.0 * "EV extra capital cost per year 2045" + + (2045-period)/25.0 * "EV extra capital cost per year 2020" + END AS "ev_extra_cost_per_vehicle_year" + FROM ev_adoption a + JOIN study_periods p ON a.year = p.period + JOIN ev_fleet f ON f.load_zone = a.load_zone + WHERE a.load_zone in %(load_zones)s + AND time_sample = %(time_sample)s + AND ev_scenario = %(ev_scenario)s + ) + SELECT "LOAD_ZONE", + CONCAT_WS('_', 'All', replace(ice_fuel, 'Motor_', ''), 'Vehicles') AS "VEHICLE_TYPE", + "PERIOD", + SUM(n_vehicles) AS n_vehicles, + SUM(ice_gals_per_year*n_vehicles)/SUM(n_vehicles) AS ice_gals_per_year, + ice_fuel, + SUM(ev_kwh_per_year*n_vehicles)/SUM(n_vehicles) AS ev_kwh_per_year, + SUM(ev_extra_cost_per_vehicle_year*n_vehicles)/SUM(n_vehicles) + AS ev_extra_cost_per_vehicle_year + FROM detailed_fleet + GROUP BY 1, 2, 3, 6 + ORDER BY 1, 2, 3; + """, args) + # power consumption bids for each hour of the day + # (consolidate to one vehicle class to accelerate data retrieval and + # reduce model memory requirements) (note that there are 6 classes of + # vehicle and 25 bids for for 24-hour models, which makes 150 entries + # per load zone and timestep, which is larger than the renewable + # capacity factor data) + if args.get("skip_ev_bids", False): + print "SKIPPING ev_charging_bids.tab" + else: + write_table('ev_charging_bids.tab', """ + SELECT + b.load_zone AS "LOAD_ZONE", + CONCAT_WS('_', 'All', "ICE fuel", 'Vehicles') AS "VEHICLE_TYPE", + bid_number AS "BID_NUM", + study_hour AS "TIMEPOINT", + sum(charge_mw) AS ev_bid_by_type + FROM study_date d + JOIN study_hour h USING (study_date, time_sample) + JOIN ev_charging_bids b + ON b.hour = h.hour_of_day AND b.hours_per_step = d.ts_duration_of_tp + JOIN ev_fleet f ON b.vehicle_type=f."vehicle type" AND b.load_zone=f.load_zone + WHERE b.load_zone in %(load_zones)s + AND d.time_sample = %(time_sample)s + GROUP BY 1, 2, 3, 4 + ORDER BY 1, 2, 3, 4; + """, args) ######################### # pumped hydro From dfd519a84c578272b3946c3a3da96adeaa90c59c Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 12 Jul 2018 11:53:26 -1000 Subject: [PATCH 45/51] Cache last_results object at right point in solution sequence; only load full solution long enough to save it (if ever). --- switch_model/reporting/__init__.py | 12 ++++++++++++ switch_model/solve.py | 16 ++++++---------- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/switch_model/reporting/__init__.py b/switch_model/reporting/__init__.py index 0cdf51838..949d14d8e 100644 --- a/switch_model/reporting/__init__.py +++ b/switch_model/reporting/__init__.py @@ -141,5 +141,17 @@ def save_total_cost_value(instance, outdir): def save_results(instance, outdir): + """ + Save model solution for later reuse. + + Note that this pickles a solver results object because the instance itself + cannot be pickled -- see + https://stackoverflow.com/questions/39941520/pyomo-ipopt-does-not-return-solution + """ + # First, save the full solution data to the results object, because recent + # versions of Pyomo only store execution metadata there by default. + instance.solutions.store_to(instance.last_results) with open(os.path.join(outdir, 'results.pickle'), 'wb') as fh: pickle.dump(instance.last_results, fh, protocol=-1) + # remove the solution from the results object, to minimize long-term memory use + instance.last_results.solution.clear() diff --git a/switch_model/solve.py b/switch_model/solve.py index 3782d8815..283c76484 100755 --- a/switch_model/solve.py +++ b/switch_model/solve.py @@ -609,7 +609,11 @@ def solve(model): # Only return if the model solved correctly, otherwise throw a useful error if(results.solver.status in {SolverStatus.ok, SolverStatus.warning} and results.solver.termination_condition == TerminationCondition.optimal): - return results + # Cache a copy of the results object, to allow saving and restoring model + # solutions later. + model.last_results = results + # Successful solution, return results + return results elif (results.solver.termination_condition == TerminationCondition.infeasible): if hasattr(model, "iis"): print "Model was infeasible; irreducibly inconsistent set (IIS) returned by solver:" @@ -627,15 +631,7 @@ def solve(model): print "Hint: glpk has been known to classify infeasible problems as 'other'." raise RuntimeError("Solver failed to find an optimal solution.") - # Copy the solution data into the results object (it only has execution - # metadata by default in recent versions of Pyomo). This will enable us to - # save and restore model solutions; the results object can be pickled to a - # file on disk, but the instance cannot. - # https://stackoverflow.com/questions/39941520/pyomo-ipopt-does-not-return-solution - # Note: this will fail if the solver doesn't return values for the variables - # (e.g., for infeasible models), so it should happen after the feasibility test. - model.solutions.store_to(results) - model.last_results = results + # no default return, because we'll never reach here # taken from https://software.sandia.gov/trac/pyomo/browser/pyomo/trunk/pyomo/opt/base/solvers.py?rev=10784 # This can be removed when all users are on Pyomo 4.2 From f79f3bbec908cec74e0c5fd5e4ed76550124ee73 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Fri, 13 Jul 2018 20:21:21 -1000 Subject: [PATCH 46/51] Allow specification of RPS levels on command line, overriding rps_targets.tab --- switch_model/hawaii/lng_conversion.py | 62 ++--- switch_model/hawaii/rps.py | 329 +++++++++++++------------- 2 files changed, 201 insertions(+), 190 deletions(-) diff --git a/switch_model/hawaii/lng_conversion.py b/switch_model/hawaii/lng_conversion.py index 2ad6e5a61..c5db0c581 100644 --- a/switch_model/hawaii/lng_conversion.py +++ b/switch_model/hawaii/lng_conversion.py @@ -5,14 +5,14 @@ # TODO: change fuel_markets_expansion to support more complex supply chains, # e.g., a regional facility (LNG switch) in line with a market upgrade (bulk LNG), # and possibly additional upgrades beyond (e.g., adding a second FSRU). -# For now, we include the cost of the LNG switch via ad-hoc constraints +# For now, we include the cost of the LNG switch via ad-hoc constraints from pyomo.environ import * from switch_model.financials import capital_recovery_factor def define_arguments(argparser): - argparser.add_argument('--force-lng-tier', nargs='*', default=None, - help="LNG tier to use or 'none' to use no LNG; can also specify start and end date to use this tier; optimal choices will be made if nothing specified.") + argparser.add_argument('--force-lng-tier', nargs='*', default=None, + help="LNG tier to use: tier [start [stop]] or 'none' to use no LNG. Optimal choices will be made if nothing specified.") def define_components(m): @@ -22,9 +22,9 @@ def define_components(m): # on which path to follow with LNG, if any) # Note: if we activate a tier in any market, we activate it in all markets # (e.g., bringing in containerized LNG for all islands) - + m.LNG_RFM_SUPPLY_TIERS = Set( - initialize=m.RFM_SUPPLY_TIERS, + initialize=m.RFM_SUPPLY_TIERS, filter=lambda m, rfm, per, tier: m.rfm_fuel[rfm].upper() == 'LNG' ) m.LNG_REGIONAL_FUEL_MARKETS = Set( @@ -33,41 +33,41 @@ def define_components(m): m.LNG_TIERS = Set( initialize=lambda m: {tier for rfm, per, tier in m.LNG_RFM_SUPPLY_TIERS} ) - - # force LNG to be deactivated when RPS is 100%; + + # force LNG to be deactivated when RPS is 100%; # this forces recovery of all costs before the 100% RPS takes effect # (otherwise the model sometimes tries to postpone recovery beyond the end of the study) if hasattr(m, 'RPS_Enforce'): - m.No_LNG_In_100_RPS = Constraint(m.LNG_RFM_SUPPLY_TIERS, + m.No_LNG_In_100_RPS = Constraint(m.LNG_RFM_SUPPLY_TIERS, rule=lambda m, rfm, per, tier: - (m.RFMSupplyTierActivate[rfm, per, tier] == 0) - if m.rps_target_for_period[per] >= 1.0 + (m.RFMSupplyTierActivate[rfm, per, tier] == 0) + if m.rps_target_for_period[per] >= 1.0 else Constraint.Skip ) - - # user can study different LNG durations by specifying a tier to activate and + + # user can study different LNG durations by specifying a tier to activate and # a start and end date. Both the capital recovery and fixed costs for this tier are # bundled into the market's fixed cost, which means a different fuel_supply_curves.tab # file is needed for each LNG duration (i.e., the tiers must be forced on or off # for a particular duration which matches the fuel_supply_curves.tab). This is # brittle and requires trying all permutations to find the optimum, which is not - # good. A better way would be to specify capital costs separately from fixed costs, - # and add a flag to force the model to recover capital costs completely within the + # good. A better way would be to specify capital costs separately from fixed costs, + # and add a flag to force the model to recover capital costs completely within the # study period if desired. (Another flag could set a minimum duration for LNG # infrastructure to be activated.) - + # This may mean defining a tier upgrade as a project with a certain capital cost # and fixed O&M. Or maybe for LNG upgrades, we require full recovery during the # online period? i.e., lump the cost on the first day of use? or amortize it over - # all fuel that passes through the project? maybe just allow specification of + # all fuel that passes through the project? maybe just allow specification of # capital cost and project life for LNG upgrades, and allow deactivation (avoiding # fixed O&M) after a certain period of time. Then PSIP module could force longer # activation if needed. - + # In the end, this was resolved by having the user specify multiple tiers with # different lifetimes and corresponding fixed costs per year; then the model # (or user) can choose a tier with a particular lifetime. - + # force use of a particular LNG tier in particular periods def Force_LNG_Tier_rule(m, rfm, per, tier): if m.options.force_lng_tier is None: @@ -104,8 +104,8 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # list of all projects and timepoints when LNG could potentially be used - m.LNG_GEN_TIMEPOINTS = Set(dimen=2, initialize = lambda m: - ((p, t) for p in m.GENERATION_PROJECTS_BY_FUEL['LNG'] for t in m.TIMEPOINTS + m.LNG_GEN_TIMEPOINTS = Set(dimen=2, initialize = lambda m: + ((p, t) for p in m.GENERATION_PROJECTS_BY_FUEL['LNG'] for t in m.TIMEPOINTS if (p, t) in m.GEN_TPS) ) @@ -119,33 +119,33 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # are included in the LNG supply tiers, so we don't need to worry about that. m.LNG_CONVERTED_PLANTS = Set( initialize=[ - 'Oahu_Kahe_K5', 'Oahu_Kahe_K6', + 'Oahu_Kahe_K5', 'Oahu_Kahe_K6', 'Oahu_Kalaeloa_CC1_CC2', # used in some older models 'Oahu_Kalaeloa_CC1', 'Oahu_Kalaeloa_CC2', 'Oahu_Kalaeloa_CC3', 'Oahu_CC_383', 'Oahu_CC_152', 'Oahu_CT_100' ] ) - m.LNG_In_Converted_Plants_Only = Constraint(m.LNG_GEN_TIMEPOINTS, + m.LNG_In_Converted_Plants_Only = Constraint(m.LNG_GEN_TIMEPOINTS, rule=lambda m, g, tp: Constraint.Skip if g in m.LNG_CONVERTED_PLANTS else (m.GenFuelUseRate[g, tp, 'LNG'] == 0) ) - + # CODE BELOW IS DISABLED because we have abandoned the 'container' tier which cost - # more than LSFO, and because we would rather show the choice that is made if LNG + # more than LSFO, and because we would rather show the choice that is made if LNG # is more expensive (i.e., stick with LSFO) # NOTE: all the code below works together to force the model to meet an LNG quota - or try as - # hard as possible - if LNG has been activated and the variable cost is higher than LSFO. - # These constraints could potentially be replaced with simpler code that forces the power - # system to meet the LNG quota, but then that could be infeasible if there is not enough + # hard as possible - if LNG has been activated and the variable cost is higher than LSFO. + # These constraints could potentially be replaced with simpler code that forces the power + # system to meet the LNG quota, but then that could be infeasible if there is not enough # LNG-capable generation capacity to meet that quota. - + # # largest amount of LNG that might be consumed per year (should be at least # # equal to the amount that might be activated and left unused, but # # not too much bigger); this is 2 million tons per year * 52 MMBtu/ton # big_market_lng = 2e6 * 52 # MMbtu/year - + # # LNG converted plants must use LNG unless the supply is exhausted # # note: in this formulation, FuelConsumptionInMarket can be low, # # unless LNG_Has_Slack is zero, in which case all available fuel @@ -198,7 +198,7 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # m.LNG_GEN_TIMEPOINTS, # rule=Only_LNG_In_Converted_Plants_rule # ) - + # # If the 'container' tier is forced on, then # # force LNG-capable plants to run at max power, or up to the # # point where they exhaust all active LNG tiers. Combined with the @@ -229,7 +229,7 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # m.LNG_GEN_TIMEPOINTS, # rule=Force_Converted_Plants_On_rule # ) - + # # force consumption up to the limit if the 'container' tier is activated, # # because this tier sometimes costs more than oil, in which case it will # # be avoided without this rule. (this also models HECO's commitment to LNG in PSIP) diff --git a/switch_model/hawaii/rps.py b/switch_model/hawaii/rps.py index 6842d533c..7aa54861e 100644 --- a/switch_model/hawaii/rps.py +++ b/switch_model/hawaii/rps.py @@ -5,29 +5,29 @@ from util import get def define_arguments(argparser): - argparser.add_argument('--biofuel-limit', type=float, default=1.0, + argparser.add_argument('--biofuel-limit', type=float, default=1.0, help="Maximum fraction of power that can be obtained from biofuel in any period (default=1.0)") - argparser.add_argument('--biofuel-switch-threshold', type=float, default=1.0, + argparser.add_argument('--biofuel-switch-threshold', type=float, default=1.0, help="RPS level at which all thermal plants switch to biofuels (0.0-1.0, default=1.0); use with --rps-allocation fuel_switch_at_high_rps") argparser.add_argument('--rps-activate', default='activate', - dest='rps_level', action='store_const', const='activate', + dest='rps_level', action='store_const', const='activate', help="Activate RPS (on by default).") - argparser.add_argument('--rps-deactivate', - dest='rps_level', action='store_const', const='deactivate', + argparser.add_argument('--rps-deactivate', + dest='rps_level', action='store_const', const='deactivate', help="Deactivate RPS.") - argparser.add_argument('--rps-no-new-renewables', - dest='rps_level', action='store_const', const='no_new_renewables', + argparser.add_argument('--rps-no-new-renewables', + dest='rps_level', action='store_const', const='no_new_renewables', help="Deactivate RPS and don't allow any new renewables except to replace existing capacity.") argparser.add_argument('--rps-no-new-wind', action='store_true', default=False, help="Don't allow any new wind capacity except to replace existing capacity.") argparser.add_argument('--rps-no-wind', action='store_true', default=False, help="Don't allow any new wind capacity or replacement of existing capacity.") argparser.add_argument( - '--rps-allocation', default=None, + '--rps-allocation', default=None, choices=[ - 'quadratic', - 'fuel_switch_by_period', 'fuel_switch_by_timeseries', - 'full_load_heat_rate', + 'quadratic', + 'fuel_switch_by_period', 'fuel_switch_by_timeseries', + 'full_load_heat_rate', 'split_commit', 'relaxed_split_commit', 'fuel_switch_at_high_rps', @@ -35,6 +35,11 @@ def define_arguments(argparser): help="Method to use to allocate power output among fuels. Default is fuel_switch_by_period for models " + "with unit commitment, full_load_heat_rate for models without." ) + argparser.add_argument('--rps-targets', nargs='*', default=None, + help="Targets to use for RPS, specified as --rps-targets year1 level1 year2 level2 ..., " + "where years are transition years and levels are fractions between 0 and 1. " + "If not specified, values from rps_targets.tab will be used." + ) # TODO: make this work with progressive hedging as follows: # add a variable indexed over all weather scenarios and all cost scenarios, @@ -43,12 +48,12 @@ def define_arguments(argparser): # May instead need to treat the RPS more like a limit on non-renewable production (as a fraction of loads)? # Designate the allocations as a first-stage variable. # Require each subproblem to work within its part of the allocation. Also require in each subproblem -# that the allocations across all weather scenarios (within each cost scenario) average out to match the +# that the allocations across all weather scenarios (within each cost scenario) average out to match the # actual target (when applying the scenario weights). # Then PHA will force all the scenarios to agree on how the target is allocated among them. # Could do the same with hydrogen storage: require average hydrogen stored across all scenarios # to be less than the size of the storage built. - + def define_components(m): """ @@ -56,10 +61,10 @@ def define_components(m): ################### # RPS calculation ################## - + m.f_rps_eligible = Param(m.FUELS, within=Binary) - m.RPS_ENERGY_SOURCES = Set(initialize=lambda m: + m.RPS_ENERGY_SOURCES = Set(initialize=lambda m: [s for s in m.NON_FUEL_ENERGY_SOURCES if s != 'Battery'] + [f for f in m.FUELS if m.f_rps_eligible[f]] ) @@ -77,7 +82,7 @@ def rps_target_for_period_rule(m, p): # m.rps_fuel_limit = Param(default=float("inf"), mutable=True) m.rps_fuel_limit = Param(initialize=m.options.biofuel_limit, mutable=True) - # Define DispatchGenRenewableMW, which shows the amount of power produced + # Define DispatchGenRenewableMW, which shows the amount of power produced # by each project from each fuel during each time step. define_DispatchGenRenewableMW(m) @@ -85,15 +90,15 @@ def rps_target_for_period_rule(m, p): m.RPSFuelPower = Expression(m.PERIODS, rule=lambda m, per: sum( m.DispatchGenRenewableMW[g, tp] * m.tp_weight[tp] - for g in m.FUEL_BASED_GENS + for g in m.FUEL_BASED_GENS for tp in m.TPS_FOR_GEN_IN_PERIOD[g, per] ) ) - # Note: this rule ignores pumped hydro and batteries, so it could be gamed by producing extra - # RPS-eligible power and burning it off in storage losses; on the other hand, + # Note: this rule ignores pumped hydro and batteries, so it could be gamed by producing extra + # RPS-eligible power and burning it off in storage losses; on the other hand, # it also neglects the (small) contribution from net flow of pumped hydro projects. - # TODO: incorporate pumped hydro into this rule, maybe change the target to refer to + # TODO: incorporate pumped hydro into this rule, maybe change the target to refer to # sum(getattr(m, component)[z, t] for z in m.LOAD_ZONES) for component in m.Zone_Power_Injections) # power production that can be counted toward the RPS each period @@ -117,7 +122,7 @@ def rps_target_for_period_rule(m, p): for tp in m.TPS_FOR_GEN_IN_PERIOD[g, per] ) ) - + if m.options.rps_level == 'activate': # we completely skip creating the constraint if the RPS is not activated. # this makes it easy for other modules to check whether there's an RPS in effect @@ -129,14 +134,14 @@ def rps_target_for_period_rule(m, p): elif m.options.rps_level == 'no_new_renewables': # prevent construction of any new exclusively-renewable projects, but allow # replacement of existing ones - # (doesn't ban use of biofuels in existing or multi-fuel projects, but that could + # (doesn't ban use of biofuels in existing or multi-fuel projects, but that could # be done with --biofuel-limit 0) m.No_New_Renewables = Constraint(m.NEW_GEN_BLD_YRS, rule=lambda m, g, bld_yr: (m.GenCapacity[g, bld_yr] <= m.GenCapacity[g, m.PERIODS.first()] - m.BuildGen[g, m.PERIODS.first()]) if m.gen_energy_source[g] in m.RPS_ENERGY_SOURCES else Constraint.Skip ) - + wind_energy_sources = {'WND'} if m.options.rps_no_new_wind: # limit wind to existing capacity @@ -156,36 +161,36 @@ def rps_target_for_period_rule(m, p): # Don't allow (bio)fuels to provide more than a certain percentage of the system's energy # Note: when the system really wants to use more biofuel, it is possible to "game" this limit by # cycling power through batteries, pumped storage, transmission lines or the hydrogen system to - # burn off some - # extra non-fuel energy, allowing more biofuel into the system. (This doesn't typically happen + # burn off some + # extra non-fuel energy, allowing more biofuel into the system. (This doesn't typically happen # with batteries due to high variable costs -- e.g., it has to cycle 4 kWh through a battery to - # consume 1 kWh of non-biofuel power, to allow 0.05 kWh of additional biofuel into the system. + # consume 1 kWh of non-biofuel power, to allow 0.05 kWh of additional biofuel into the system. # Even if this can save $0.5/kWh, if battery cycling costs $0.15/kWh, that means $0.60 extra to # save $0.025. It also doesn't happen in the hydrogen scenario, since storing intermittent power # directly as hydrogen can directly displace biofuel consumption. But it could happen if batteries # have low efficiency or low cycling cost, or if transmission losses are significant.) # One solution would be to only apply the RPS to the predefined load (not generation), but then # transmission and battery losses could be served by fossil fuels. - # Alternatively: limit fossil fuels to (1-rps) * standard loads + # Alternatively: limit fossil fuels to (1-rps) * standard loads # and limit biofuels to (1-bio)*standard loads. This would force renewables to be used for # all losses, which is slightly inaccurate. # TODO: fix the problem noted above; for now we don't worry too much because there are no # transmission losses, the cycling costs for batteries are too high and pumped storage is only # adopted on a small scale. - + m.RPS_Fuel_Cap = Constraint(m.PERIODS, rule = lambda m, per: m.RPSFuelPower[per] <= m.rps_fuel_limit * m.RPSTotalPower[per] ) def define_DispatchGenRenewableMW(m): - # Define DispatchGenRenewableMW, which shows the amount of power produced + # Define DispatchGenRenewableMW, which shows the amount of power produced # by each project from each fuel during each time step. # This must be linear, because it may be used in RPS calculations. # This can get complex when a project uses multiple fuels and incremental - # heat rate curves. + # heat rate curves. if m.options.rps_allocation is None: if hasattr(m, 'FUEL_USE_SEGMENTS_FOR_GEN'): - # using heat rate curves and possibly startup fuel; + # using heat rate curves and possibly startup fuel; # have to do more advanced allocation of power to fuels m.options.rps_allocation = 'fuel_switch_by_period' else: @@ -193,7 +198,7 @@ def define_DispatchGenRenewableMW(m): m.options.rps_allocation = 'full_load_heat_rate' if m.options.verbose: print "Using {} method to allocate DispatchGenRenewableMW".format(m.options.rps_allocation) - + if m.options.rps_allocation == 'full_load_heat_rate': simple_DispatchGenRenewableMW(m) elif m.options.rps_allocation == 'quadratic': @@ -212,13 +217,13 @@ def define_DispatchGenRenewableMW(m): def simple_DispatchGenRenewableMW(m): # Allocate the power produced during each timepoint among the fuels. # When not using heat rate curves, this can be calculated directly from - # fuel usage and the full load heat rate. This also allows use of + # fuel usage and the full load heat rate. This also allows use of # multiple fuels in the same project at the same time. m.DispatchGenRenewableMW = Expression( m.FUEL_BASED_GEN_TPS, rule=lambda m, g, t: sum( - m.GenFuelUseRate[g, t, f] + m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g] if m.f_rps_eligible[f] ) @@ -230,74 +235,74 @@ def split_commit_DispatchGenRenewableMW(m): # This approach requires the utility to designate part of their capacity for # renewable production and part for non-renewable, and show how they commit # and dispatch each part. The current version allows fractional commitment to - # each mode, but we could use integer commitment variables to force full units + # each mode, but we could use integer commitment variables to force full units # into each mode (more physically meaningful, but unnecessarily restrictive and # harder to calculate; the current version may serve as a reasonable accounting # method for multi-fuel projects in a partial-RPS environment). - + # TODO: limit this to projects that can use both renewable and non-renewable fuel - # TODO: force CommitGenRenewable == CommitGen when there's 100% RPS - # TODO: force DispatchGenRenewableMW == DispatchGen when there's 100% RPS + # TODO: force CommitGenRenewable == CommitGen when there's 100% RPS + # TODO: force DispatchGenRenewableMW == DispatchGen when there's 100% RPS # TODO: force CommitGenRenewable == 0 when there's 0% RPS # (these may not be needed: single-category projects will get dispatch forced to zero # in one category and forced up to total dispatch in another; non-renewable capacity # can't get committed in the 100% RPS due to non-zero min loads) - + # count amount of renewable power produced from project m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.DispatchGenRenewableMW_Cap = Constraint(m.FUEL_BASED_GEN_TPS, + m.DispatchGenRenewableMW_Cap = Constraint(m.FUEL_BASED_GEN_TPS, rule = lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] ) # a portion of every startup and shutdown must be designated as renewable m.CommitGenRenewable = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.CommitGenRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, + m.CommitGenRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, rule = lambda m, g, tp: m.CommitGenRenewable[g, tp] <= m.CommitGen[g, tp] ) m.StartupGenCapacityRenewable = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.StartupGenCapacityRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, + m.StartupGenCapacityRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, rule = lambda m, g, tp: m.StartupGenCapacityRenewable[g, tp] <= m.StartupGenCapacity[g, tp] ) m.ShutdownGenCapacityRenewable = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.ShutdownGenCapacityRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, + m.ShutdownGenCapacityRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, rule = lambda m, g, tp: m.ShutdownGenCapacityRenewable[g, tp] <= m.ShutdownGenCapacity[g, tp] ) # chain commitments, startup and shutdown for renewables m.Commit_StartupGenCapacity_ShutdownGenCapacity_Consistency_Renewable = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: + rule=lambda m, g, tp: m.CommitGenRenewable[g, m.tp_previous[tp]] - + m.StartupGenCapacityRenewable[g, tp] - - m.ShutdownGenCapacityRenewable[g, tp] + + m.StartupGenCapacityRenewable[g, tp] + - m.ShutdownGenCapacityRenewable[g, tp] == m.CommitGenRenewable[g, tp] ) # must use committed capacity for renewable production m.Enforce_Dispatch_Upper_Limit_Renewable = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.CommitGenRenewable[g, tp] ) # can't dispatch non-renewable capacity below its lower limit m.Enforce_Dispatch_Lower_Limit_Non_Renewable = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: + rule=lambda m, g, tp: (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) - >= - (m.CommitGen[g, tp] - m.CommitGenRenewable[g, tp]) + >= + (m.CommitGen[g, tp] - m.CommitGenRenewable[g, tp]) * m.gen_min_load_fraction_TP[g, tp] ) # use standard heat rate calculations for renewable and non-renewable parts m.ProjRenewableFuelUseRate_Calculate = Constraint( m.GEN_TPS_FUEL_PIECEWISE_CONS_SET, - rule=lambda m, g, tp, intercept, incremental_heat_rate: + rule=lambda m, g, tp, intercept, incremental_heat_rate: sum( - m.GenFuelUseRate[g, tp, f] + m.GenFuelUseRate[g, tp, f] for f in m.FUELS_FOR_GEN[g] if f in m.RPS_ENERGY_SOURCES - ) + ) >= m.StartupGenCapacityRenewable[g, tp] * m.gen_startup_fuel[g] / m.tp_duration_hrs[tp] + intercept * m.CommitGenRenewable[g, tp] @@ -305,12 +310,12 @@ def split_commit_DispatchGenRenewableMW(m): ) m.ProjNonRenewableFuelUseRate_Calculate = Constraint( m.GEN_TPS_FUEL_PIECEWISE_CONS_SET, - rule=lambda m, g, tp, intercept, incremental_heat_rate: + rule=lambda m, g, tp, intercept, incremental_heat_rate: sum( - m.GenFuelUseRate[g, tp, f] + m.GenFuelUseRate[g, tp, f] for f in m.FUELS_FOR_GEN[g] if f not in m.RPS_ENERGY_SOURCES - ) + ) >= (m.StartupGenCapacity[g, tp] - m.StartupGenCapacityRenewable[g, tp]) * m.gen_startup_fuel[g] / m.tp_duration_hrs[tp] + intercept * (m.CommitGen[g, tp] - m.CommitGenRenewable[g, tp]) @@ -324,15 +329,15 @@ def relaxed_split_commit_DispatchGenRenewableMW(m): # then replaced by m.DispatchGenRenewableMW. # This means all startup fuel can be non-renewable, except when the RPS # is 100%. - + # count amount of renewable power produced from project m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.DispatchGenRenewableMW_Cap = Constraint(m.FUEL_BASED_GEN_TPS, + m.DispatchGenRenewableMW_Cap = Constraint(m.FUEL_BASED_GEN_TPS, rule = lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] ) m.StartupGenCapacityRenewable = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - m.StartupGenCapacityRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, + m.StartupGenCapacityRenewable_Cap = Constraint(m.FUEL_BASED_GEN_TPS, rule = lambda m, g, tp: m.StartupGenCapacityRenewable[g, tp] <= m.StartupGenCapacity[g, tp] ) @@ -340,13 +345,13 @@ def relaxed_split_commit_DispatchGenRenewableMW(m): # can't dispatch non-renewable capacity below its lower limit m.Enforce_Dispatch_Lower_Limit_Non_Renewable = Constraint( m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: + rule=lambda m, g, tp: (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) - >= - (m.CommitGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) + >= + (m.CommitGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) * m.gen_min_load_fraction_TP[g, tp] ) - + # rule=lambda m, g, t, intercept, incremental_heat_rate: ( # sum(m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g]) >= # # Do the startup @@ -360,16 +365,16 @@ def relaxed_split_commit_DispatchGenRenewableMW(m): # output to 180 MW but the plant is rated 185 MW.) # use standard heat rate calculations for renewable and non-renewable parts - # These set a lower bound for each type of fuel, as if we committed one slice of capacity + # These set a lower bound for each type of fuel, as if we committed one slice of capacity # for renewables and one slice for non-renewable, equal to the amount of power from each. m.ProjRenewableFuelUseRate_Calculate = Constraint( m.GEN_TPS_FUEL_PIECEWISE_CONS_SET, - rule=lambda m, g, tp, intercept, incremental_heat_rate: + rule=lambda m, g, tp, intercept, incremental_heat_rate: sum( - m.GenFuelUseRate[g, tp, f] + m.GenFuelUseRate[g, tp, f] for f in m.FUELS_FOR_GEN[g] if f in m.RPS_ENERGY_SOURCES - ) + ) >= m.StartupGenCapacityRenewable[g, tp] * m.gen_startup_fuel[g] / m.tp_duration_hrs[tp] + intercept * m.DispatchGenRenewableMW[g, tp] @@ -377,12 +382,12 @@ def relaxed_split_commit_DispatchGenRenewableMW(m): ) m.ProjNonRenewableFuelUseRate_Calculate = Constraint( m.GEN_TPS_FUEL_PIECEWISE_CONS_SET, - rule=lambda m, g, tp, intercept, incremental_heat_rate: + rule=lambda m, g, tp, intercept, incremental_heat_rate: sum( - m.GenFuelUseRate[g, tp, f] + m.GenFuelUseRate[g, tp, f] for f in m.FUELS_FOR_GEN[g] if f not in m.RPS_ENERGY_SOURCES - ) + ) >= (m.StartupGenCapacity[g, tp] - m.StartupGenCapacityRenewable[g, tp]) * m.gen_startup_fuel[g] / m.tp_duration_hrs[tp] + intercept * (m.DispatchGen[g, tp] - m.DispatchGenRenewableMW[g, tp]) @@ -395,7 +400,7 @@ def relaxed_split_commit_DispatchGenRenewableMW(m): m.FULL_RPS_GEN_FOSSIL_FUEL_DISPATCH_POINTS = Set( dimen=3, initialize=lambda m: [ - (g, tp, f) + (g, tp, f) for per in m.PERIODS if m.rps_target_for_period[per] == 1.0 for g in m.FUEL_BASED_GENS if (g, per) in m.GEN_PERIODS for f in m.FUELS_FOR_GEN[g] if not m.f_rps_eligible[f] @@ -406,17 +411,17 @@ def relaxed_split_commit_DispatchGenRenewableMW(m): m.FULL_RPS_GEN_FOSSIL_FUEL_DISPATCH_POINTS, rule=lambda m, g, tp, f: m.GenFuelUseRate[g, tp, f] == 0.0 ) - + def fuel_switch_at_high_rps_DispatchGenRenewableMW(m): """ switch all plants to biofuel (and count toward RPS) if and only if rps is above threshold """ - + if m.options.rps_level == 'activate': # find all dispatch points for non-renewable fuels during periods with 100% RPS m.HIGH_RPS_GEN_FOSSIL_FUEL_DISPATCH_POINTS = Set( - dimen=3, + dimen=3, initialize=lambda m: [ - (g, tp, f) + (g, tp, f) for p in m.PERIODS if m.rps_target_for_period[p] >= m.options.biofuel_switch_threshold for g in m.FUEL_BASED_GENS if (g, p) in m.GEN_PERIODS for f in m.FUELS_FOR_GEN[g] if not m.f_rps_eligible[f] @@ -436,7 +441,7 @@ def rule(m, g, tp): m.DispatchGenRenewableMW = Expression(m.FUEL_BASED_GEN_TPS, rule=rule) else: m.DispatchGenRenewableMW = Expression( - m.FUEL_BASED_GEN_TPS, within=NonNegativeReals, + m.FUEL_BASED_GEN_TPS, within=NonNegativeReals, rule=lambda m, g, tp: 0.0 ) @@ -450,16 +455,16 @@ def binary_by_period_DispatchGenRenewableMW(m): # This could be further simplified by creating a set of eligibility levels, # and choosing the amount to produce from each eligibility level (similar to the # renewable/non-renewable distinction here, but with a 50% renewable category) - + m.GEN_WITH_FUEL_ACTIVE_PERIODS = Set(dimen=2, initialize=lambda m: { - (g, pe) + (g, pe) for g in m.FUEL_BASED_GENS for pe in m.PERIODS if (g, m.TPS_IN_PERIOD[pe].first()) in m.FUEL_BASED_GEN_TPS }) - + # choose whether to run (only) on renewable fuels during each period m.DispatchRenewableFlag = Var(m.GEN_WITH_FUEL_ACTIVE_PERIODS, within=Binary) - + # force flag on or off when the RPS is simple (to speed computation) def rule(m, g, p): if m.rps_target_for_period[pe]==1.0: @@ -471,35 +476,35 @@ def rule(m, g, p): else: return Constraint.Skip m.Force_DispatchRenewableFlag = Constraint( - m.GEN_WITH_FUEL_ACTIVE_PERIODS, + m.GEN_WITH_FUEL_ACTIVE_PERIODS, rule=lambda m, g, pe: - (m.DispatchRenewableFlag[g, pe] == 0) + (m.DispatchRenewableFlag[g, pe] == 0) if (m.rps_target_for_period[pe]==0.0 or m.options.rps_level != 'activate') else ( - (m.DispatchRenewableFlag[g, pe] == 1) + (m.DispatchRenewableFlag[g, pe] == 1) if m.rps_target_for_period[pe]==1.0 else Constraint.Skip ) ) - + # count amount of renewable power produced from project m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - + # don't overcount renewable power production m.Limit_DispatchGenRenewableMW = Constraint( - m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] ) # force the flag to be set during renewable timepoints m.Set_DispatchRenewableFlag = Constraint( - m.FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] - <= + m.DispatchGenRenewableMW[g, tp] + <= m.DispatchRenewableFlag[g, m.tp_period[tp]] * m.gen_capacity_limit_mw[g] ) - + # prevent use of non-renewable fuels during renewable timepoints def Enforce_DispatchRenewableFlag_rule(m, g, tp, f): if m.f_rps_eligible[f]: @@ -511,9 +516,9 @@ def Enforce_DispatchRenewableFlag_rule(m, g, tp, f): # on total output from any power plant (that also clarifies dual analysis) big_fuel = 1.01 * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] return ( - m.GenFuelUseRate[g, tp, f] + m.GenFuelUseRate[g, tp, f] + m.DispatchRenewableFlag[g, m.tp_period[tp]] * big_fuel - <= + <= big_fuel ) m.Enforce_DispatchRenewableFlag = Constraint( @@ -522,17 +527,17 @@ def Enforce_DispatchRenewableFlag_rule(m, g, tp, f): def binary_by_timeseries_DispatchGenRenewableMW(m): m.GEN_WITH_FUEL_ACTIVE_TIMESERIES = Set(dimen=2, initialize=lambda m: { - (g, ts) + (g, ts) for g in m.FUEL_BASED_GENS for ts in m.TIMESERIES if (g, m.TPS_IN_TS[ts].first()) in m.FUEL_BASED_GEN_TPS }) - + # choose whether to run (only) on renewable fuels during each period m.DispatchRenewableFlag = Var(m.GEN_WITH_FUEL_ACTIVE_TIMESERIES, within=Binary) - + # force flag on or off depending on RPS status (to speed computation) m.Force_DispatchRenewableFlag = Constraint( - m.GEN_WITH_FUEL_ACTIVE_TIMESERIES, + m.GEN_WITH_FUEL_ACTIVE_TIMESERIES, rule=lambda m, g, ts: (m.DispatchRenewableFlag[g, ts] == 0) if m.rps_target_for_period[m.ts_period[ts]]==0.0 else ( @@ -540,38 +545,38 @@ def binary_by_timeseries_DispatchGenRenewableMW(m): else Constraint.Skip ) ) - + # count amount of renewable power produced from project m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - + # don't overcount renewable power production m.Limit_DispatchGenRenewableMW = Constraint( - m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] ) # force the flag to be set during renewable timepoints m.Set_DispatchRenewableFlag = Constraint( - m.FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] - <= + m.DispatchGenRenewableMW[g, tp] + <= m.DispatchRenewableFlag[g, m.tp_ts[tp]] * m.gen_capacity_limit_mw[g] ) - + # prevent use of non-renewable fuels during renewable timepoints m.Enforce_DispatchRenewableFlag = Constraint( - m.GEN_TP_FUELS, - rule=lambda m, g, tp, f: + m.GEN_TP_FUELS, + rule=lambda m, g, tp, f: Constraint.Skip if m.f_rps_eligible[f] else ( # original code, rewritten to get numerical parts on rhs # m.GenFuelUseRate[g, tp, f] # <= # (1-m.DispatchRenewableFlag[g, m.tp_ts[tp]]) * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] - m.GenFuelUseRate[g, tp, f] + m.GenFuelUseRate[g, tp, f] + m.DispatchRenewableFlag[g, m.tp_ts[tp]] * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] - <= + <= m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] ) ) @@ -581,32 +586,32 @@ def binary_by_timeseries_DispatchGenRenewableMW(m): def advanced2_DispatchGenRenewableMW(m): # choose whether to run (only) on renewable fuels during each timepoint m.DispatchRenewableFlag = Var(m.FUEL_BASED_GEN_TPS, within=Binary) - + # count amount of renewable power produced from project m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - + # don't overcount renewable power production m.Limit_DispatchGenRenewableMW = Constraint( - m.FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: m.DispatchGenRenewableMW[g, tp] <= m.DispatchGen[g, tp] ) # force the flag to be set during renewable timepoints m.Set_DispatchRenewableFlag = Constraint( - m.FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] - <= + m.DispatchGenRenewableMW[g, tp] + <= m.DispatchRenewableFlag[g, tp] * m.gen_capacity_limit_mw[g] ) - + # prevent use of non-renewable fuels during renewable timepoints m.Enforce_DispatchRenewableFlag = Constraint( - m.GEN_TP_FUELS, - rule=lambda m, g, tp, f: + m.GEN_TP_FUELS, + rule=lambda m, g, tp, f: Constraint.Skip if m.f_rps_eligible[f] else ( - m.GenFuelUseRate[g, tp, f] - <= + m.GenFuelUseRate[g, tp, f] + <= (1-m.DispatchRenewableFlag[g, tp]) * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] ) ) @@ -618,18 +623,18 @@ def advanced1_DispatchGenRenewableMW(m): m.DispatchGenRenewableMW = Var(m.GEN_TP_FUELS, within=NonNegativeReals) # make sure this matches total production m.DispatchGenRenewableMW_Total = Constraint( - m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: sum(m.DispatchGenRenewableMW[g, tp, f] for f in m.FUELS_FOR_GEN[g]) == m.DispatchGen[g, tp] ) - + # choose a single fuel to use during each timestep m.DispatchFuelFlag = Var(m.GEN_TP_FUELS, within=Binary) m.DispatchFuelFlag_Total = Constraint( - m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: sum(m.DispatchFuelFlag[g, tp, f] for f in m.FUELS_FOR_GEN[g]) == 1 @@ -637,26 +642,26 @@ def advanced1_DispatchGenRenewableMW(m): # consume only the selected fuel and allocate all production to that fuel (big-M constraints) m.Allocate_Dispatch_Output = Constraint( - m.GEN_TP_FUELS, - rule=lambda m, g, tp, f: - m.DispatchGenRenewableMW[g, tp, f] - <= + m.GEN_TP_FUELS, + rule=lambda m, g, tp, f: + m.DispatchGenRenewableMW[g, tp, f] + <= m.DispatchFuelFlag[g, tp, f] * m.gen_capacity_limit_mw[g] ) m.Allocate_Dispatch_Fuel = Constraint( - m.GEN_TP_FUELS, - rule=lambda m, g, tp, f: - m.GenFuelUseRate[g, tp, f] - <= + m.GEN_TP_FUELS, + rule=lambda m, g, tp, f: + m.GenFuelUseRate[g, tp, f] + <= m.DispatchFuelFlag[g, tp, f] * m.gen_capacity_limit_mw[g] * m.gen_full_load_heat_rate[g] ) - - # note: in cases where a project has a single fuel, the presolver should force + + # note: in cases where a project has a single fuel, the presolver should force # DispatchGenRenewableMW for that fuel to match DispatchGen, and possibly # eliminate the allocation constraints - + # possible simplifications: - # omit binary variables and big-m constraints if len(m.FUELS_FOR_GEN[p]) == 1 + # omit binary variables and big-m constraints if len(m.FUELS_FOR_GEN[p]) == 1 # (assign all production to the single fuel) # use m.GenFuelUseRate[g, t, f] / m.gen_full_load_heat_rate[g] # for gects with no heat rate curve and no startup fuel @@ -664,12 +669,12 @@ def advanced1_DispatchGenRenewableMW(m): # note: a continuous, quadratic version of this function can be created as follows: # - make DispatchFuelFlag a PercentFraction instead of Binary # - replace gen_capacity_limit_mw with GenCapacity in Allocate_Dispatch_Output - # - replace m.gen_capacity_limit_mw * m.gen_full_load_heat_rate with + # - replace m.gen_capacity_limit_mw * m.gen_full_load_heat_rate with # sum(m.GenFuelUseRate[g, t, f] for f in m.FUELS_FOR_GEN[g]) # in Allocate_Dispatch_Fuel (define this as an Expression in dispatch.py) # - replace <= with == in the allocation constraints # - drop the DispatchGenRenewableMW_Total constraint - + # or this would also work: # m.DispatchGenRenewableMW = Var(m.GEN_TP_FUELS) # m.DispatchGenRenewableMW_Allocate = Constraint( @@ -685,30 +690,30 @@ def advanced1_DispatchGenRenewableMW(m): def quadratic_DispatchGenRenewableMW(m): # choose how much power to obtain from renewables during each timepoint m.DispatchRenewableFraction = Var(m.FUEL_BASED_GEN_TPS, within=PercentFraction) - + # count amount of renewable power produced from project m.DispatchGenRenewableMW = Var(m.FUEL_BASED_GEN_TPS, within=NonNegativeReals) - + # don't overcount renewable power production m.Set_DispatchRenewableFraction = Constraint( - m.FUEL_BASED_GEN_TPS, + m.FUEL_BASED_GEN_TPS, rule=lambda m, g, tp: - m.DispatchGenRenewableMW[g, tp] - <= + m.DispatchGenRenewableMW[g, tp] + <= m.DispatchRenewableFraction[g, tp] * m.DispatchGen[g, tp] ) m.Enforce_DispatchRenewableFraction = Constraint( - m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: sum( - m.GenFuelUseRate[g, tp, f] - for f in m.FUELS_FOR_GEN[g] + m.GenFuelUseRate[g, tp, f] + for f in m.FUELS_FOR_GEN[g] if m.f_rps_eligible[f] ) >= m.DispatchRenewableFraction[g, tp] * sum( - m.GenFuelUseRate[g, tp, f] + m.GenFuelUseRate[g, tp, f] for f in m.FUELS_FOR_GEN[g] ) ) @@ -719,13 +724,13 @@ def quadratic1_DispatchGenRenewableMW(m): # make sure this matches total production m.DispatchGenRenewableMW_Total = Constraint( - m.FUEL_BASED_GEN_TPS, - rule=lambda m, g, tp: + m.FUEL_BASED_GEN_TPS, + rule=lambda m, g, tp: sum(m.DispatchGenRenewableMW[g, tp, f] for f in m.FUELS_FOR_GEN[g]) == m.DispatchGen[g, tp] ) - + m.DispatchGenRenewableMW_Allocate = Constraint( m.GEN_TP_FUELS, rule = lambda m, g, t, f: @@ -742,10 +747,16 @@ def load_inputs(m, switch_data, inputs_dir): filename=os.path.join(inputs_dir, 'fuels.tab'), select=('fuel', 'rps_eligible'), param=(m.f_rps_eligible,)) - switch_data.load_aug( - optional=True, - filename=os.path.join(inputs_dir, 'rps_targets.tab'), - autoselect=True, - index=m.RPS_YEARS, - param=(m.rps_target,)) - + if m.options.rps_targets is None: + switch_data.load_aug( + optional=True, + filename=os.path.join(inputs_dir, 'rps_targets.tab'), + autoselect=True, + index=m.RPS_YEARS, + param=(m.rps_target,)) + else: + # construct data from a target specified as 'year1 level1 year2 level2 ...' + iterator = iter(m.options.rps_targets) + rps_targets = {int(year): float(target) for year, target in zip(iterator, iterator)} + switch_data.data()['RPS_YEARS'] = {None: sorted(rps_targets.keys())} + switch_data.data()['rps_target'] = rps_targets From a7c22a45332a799b81638361b60643efca5eab12 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Sat, 14 Jul 2018 09:14:10 -1000 Subject: [PATCH 47/51] Add --psip-relax-after argument to allow transition from PSIP to optimal plan. --- switch_model/hawaii/psip_2016_12.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/switch_model/hawaii/psip_2016_12.py b/switch_model/hawaii/psip_2016_12.py index 3a4f50e9a..26d7354ed 100644 --- a/switch_model/hawaii/psip_2016_12.py +++ b/switch_model/hawaii/psip_2016_12.py @@ -16,6 +16,8 @@ def define_arguments(argparser): help="Use only the amount of renewables shown in PSIP plans, and no more (should be combined with --psip-relax).") argparser.add_argument('--force-build', nargs=3, default=None, help="Force construction of at least a certain quantity of a particular technology during certain years. Space-separated list of year, technology and quantity.") + argparser.add_argument('--psip-relax-after', type=float, default=None, + help="Follow the PSIP plan up to and including the specified year, then optimize construction in later years. Should be combined with --psip-force.") def is_renewable(tech): return any(txt in tech for txt in ("PV", "Wind", "Solar")) @@ -234,7 +236,11 @@ def define_components(m): raise RuntimeError('You must use the lng_conversion module and set "--force-lng-tier none" to match the PSIP.') if psip: - technology_targets = technology_targets_definite + technology_targets_psip + if m.options.psip_relax_after is not None: + psip_targets = [t for t in technology_targets_psip if t[0] <= m.options.psip_relax_after] + else: + psip_targets = technology_targets_psip + technology_targets = technology_targets_definite + psip_targets else: technology_targets = technology_targets_definite @@ -315,7 +321,7 @@ def Enforce_Technology_Target_rule(m, per, tech): "Model will be infeasible.".format(tech, per) ) return Constraint.Infeasible - elif psip: + elif psip and per <= m.options.psip_relax_after: return (build == target) elif m.options.psip_minimal_renewables and tech in m.RENEWABLE_TECHNOLOGIES: # only build the specified amount of renewables, no more From 8ec9cea81a4d23540f1e606a423d2ca48be8d2ce Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 26 Jul 2018 13:25:41 -1000 Subject: [PATCH 48/51] Switch from cap_cost_scen_id to tech_scen_id and use separate specification of storage energy cost --- switch_model/hawaii/scenario_data.py | 31 +++++++++++++--------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/switch_model/hawaii/scenario_data.py b/switch_model/hawaii/scenario_data.py index 88ceeace4..e2b80fbc2 100644 --- a/switch_model/hawaii/scenario_data.py +++ b/switch_model/hawaii/scenario_data.py @@ -130,12 +130,12 @@ def write_tables(**args): # double-check that arguments are valid cur = db_cursor() cur.execute( - 'select * from generator_costs_by_year where cap_cost_scen_id = %(cap_cost_scen_id)s', + 'select * from generator_costs_by_year where tech_scen_id = %(tech_scen_id)s', args ) if len([r for r in cur]) == 0: print "================================================================" - print "WARNING: no records found in generator_costs_by_year for cap_cost_scen_id='{}'".format(args['cap_cost_scen_id']) + print "WARNING: no records found in generator_costs_by_year for tech_scen_id='{}'".format(args['tech_scen_id']) print "================================================================" time.sleep(2) del cur @@ -155,7 +155,8 @@ def write_tables(**args): SELECT DISTINCT CONCAT_WS('_', load_zone, p.technology, nullif(site, 'na'), nullif(orientation, 'na')) AS "GENERATION_PROJECT", - p.* + p.*, + g.tech_scen_id FROM project p JOIN generator_info g USING (technology) CROSS JOIN study_length @@ -167,7 +168,7 @@ def write_tables(**args): ) -- projects that could be built during the study LEFT JOIN generator_costs_by_year c ON ( - c.cap_cost_scen_id = %(cap_cost_scen_id)s + c.tech_scen_id = g.tech_scen_id AND c.technology = g.technology AND (g.min_vintage_year IS NULL OR c.year >= g.min_vintage_year) AND c.year >= study_start @@ -175,14 +176,17 @@ def write_tables(**args): ) WHERE (e.project_id IS NOT NULL OR c.technology IS NOT NULL) AND p.load_zone in %(load_zones)s + AND g.tech_scen_id IN ('all', %(tech_scen_id)s) AND g.technology NOT IN %(exclude_technologies)s; DROP TABLE IF EXISTS study_generator_info; CREATE TEMPORARY TABLE study_generator_info AS SELECT DISTINCT g.* - FROM generator_info g JOIN study_projects p USING (technology); + FROM generator_info g JOIN study_projects p USING (tech_scen_id, technology); """.format(with_period_length), args) + # import pdb; pdb.set_trace() + ######################### # financials @@ -345,11 +349,6 @@ def write_tables(**args): # Some of these are actually single-fuel, but this approach is simpler than sorting # them out within each query, and it doesn't add any complexity to the model. - if args.get('connect_cost_per_mw_km', 0): - print( - "WARNING: ignoring connect_cost_per_mw_km specified in arguments; using" - "project.connect_cost_per_mw and generator_info.connect_cost_per_kw_generic instead." - ) if args.get('wind_capital_cost_escalator', 0.0) or args.get('pv_capital_cost_escalator', 0.0): # user supplied a non-zero escalator raise ValueError( @@ -389,7 +388,7 @@ def write_tables(**args): "GENERATION_PROJECT", load_zone AS gen_load_zone, technology AS gen_tech, - connect_cost_per_mw AS gen_connect_cost_per_mw, + spur_line_cost_per_mw + 1000 * substation_cost_per_kw AS gen_connect_cost_per_mw, max_capacity AS gen_capacity_limit_mw, unit_size as gen_unit_size, max_age_years as gen_max_age, @@ -440,14 +439,12 @@ def write_tables(**args): c.capital_cost_per_kw * 1000.0 * power(1.0+%(inflation_rate)s, %(base_financial_year)s-c.base_year) AS gen_overnight_cost, - CASE WHEN i.gen_storage_efficiency IS NULL THEN NULL ELSE 0.0 END - AS gen_storage_energy_overnight_cost, - i.fixed_o_m * 1000.0 * power(1.0+%(inflation_rate)s, %(base_financial_year)s-i.base_year) + c.capital_cost_per_kwh AS gen_storage_energy_overnight_cost, + c.fixed_o_m * 1000.0 * power(1.0+%(inflation_rate)s, %(base_financial_year)s-i.base_year) AS gen_fixed_o_m, - i.min_vintage_year + i.min_vintage_year -- used for build_year filter below FROM study_generator_info i - JOIN generator_costs_by_year c USING (technology) - WHERE c.cap_cost_scen_id = %(cap_cost_scen_id)s + JOIN generator_costs_by_year c USING (technology, tech_scen_id) ORDER BY 1, 2 ) SELECT -- costs specified in proj_existing_builds From 9da47b95cacc55057684a02d92896e0004598e1f Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 26 Jul 2018 18:03:35 -1000 Subject: [PATCH 49/51] Save subcomponents of total system cost as part of standard outputs This is useful for decomposing cost on a large scale, e.g., into taxes, EV costs, transmission, etc. --- switch_model/reporting/__init__.py | 34 ++++++++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/switch_model/reporting/__init__.py b/switch_model/reporting/__init__.py index 949d14d8e..ecb68e053 100644 --- a/switch_model/reporting/__init__.py +++ b/switch_model/reporting/__init__.py @@ -45,7 +45,7 @@ def define_arguments(argparser): def write_table(instance, *indexes, **kwargs): # there must be a way to accept specific named keyword arguments and - # also an open-ended list of positional arguments (*indexes), but I + # also an open-ended list of positional arguments (*indexes), but I # don't know what that is. output_file = kwargs["output_file"] headings = kwargs["headings"] @@ -78,7 +78,6 @@ def format_row(row): w.writerows( # TODO: flatten x (unpack tuples) like Pyomo before calling values() # That may cause problems elsewhere though... - format_row(row=values(instance, *x)) for x in itertools.product(*indexes) ) @@ -109,6 +108,7 @@ def post_solve(instance, outdir): """ save_generic_results(instance, outdir, instance.options.sorted_output) save_total_cost_value(instance, outdir) + save_cost_components(instance, outdir) save_results(instance, outdir) @@ -140,6 +140,36 @@ def save_total_cost_value(instance, outdir): fh.write('{}\n'.format(value(instance.SystemCost))) +def save_cost_components(m, outdir): + """ + Save values for all individual components of total system cost on NPV basis. + """ + cost_dict = dict() + for annual_cost in m.Cost_Components_Per_Period: + cost = getattr(m, annual_cost) + # note: storing value() instead of the expression may save + # some memory while this function runs + cost_dict[annual_cost] = value(sum( + cost[p] * m.bring_annual_costs_to_base_year[p] + for p in m.PERIODS + )) + for tp_cost in m.Cost_Components_Per_TP: + cost = getattr(m, tp_cost) + cost_dict[tp_cost] = value(sum( + cost[t] * m.tp_weight_in_year[t] + * m.bring_annual_costs_to_base_year[m.tp_period[t]] + for t in m.TIMEPOINTS + )) + write_table( + m, + cost_dict.keys(), + output_file=os.path.join(outdir, "cost_components.tab"), + headings=('component', 'npv_cost'), + values=lambda m, c: (c, cost_dict[c]), + digits=16 + ) + + def save_results(instance, outdir): """ Save model solution for later reuse. From aae116d0de60e11c5c017e11bb203e74ce4542cb Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 26 Jul 2018 18:05:20 -1000 Subject: [PATCH 50/51] Add non-electricity part of vehicle fleet cost to system cost This ensures it will be included in overall evaluation, and will be important later if we switch ev_share to a decision variable. --- switch_model/hawaii/ev_advanced.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/switch_model/hawaii/ev_advanced.py b/switch_model/hawaii/ev_advanced.py index aa2f29276..ffc921902 100644 --- a/switch_model/hawaii/ev_advanced.py +++ b/switch_model/hawaii/ev_advanced.py @@ -76,8 +76,8 @@ def rule(m): # add cost components to account for the vehicle miles traveled via EV or ICE # (not used because it interferes with calculation of cost per kWh for electricity) - # m.Cost_Components_Per_Period.append('ev_extra_annual_cost') - # m.Cost_Components_Per_Period.append('ice_annual_fuel_cost') + m.Cost_Components_Per_Period.append('ev_extra_annual_cost') + m.Cost_Components_Per_Period.append('ice_annual_fuel_cost') # EV bid data -- total MW used by 100% EV fleet, for each zone, veh type, # bid number, timepoint From fc19cfee23039dc255d8d1615ca4ac17e8d4f021 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Tue, 31 Jul 2018 19:17:12 -1000 Subject: [PATCH 51/51] Change version to 2.0.0 --- switch_model/version.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/switch_model/version.py b/switch_model/version.py index b48ca99f7..3d80c09b1 100644 --- a/switch_model/version.py +++ b/switch_model/version.py @@ -2,8 +2,8 @@ # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. """ This file should only include the version. Do not import any packages or -modules in here because this file needs to be executed before SWITCH is +modules here because this file needs to be executed before SWITCH is installed and executed in environments that don't have any dependencies installed. """ -__version__='2.0.0b4' \ No newline at end of file +__version__='2.0.0'