From 53f447687279323ea9cc69a6f663dfd18ebe8c25 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Mon, 6 Aug 2018 08:26:29 -1000 Subject: [PATCH 01/38] Start 2.0.1 branch; calculate ev emissions in hawaii.ev_advanced; add --save-expression[s] flag to allow saving of expressions (or params, undocumented) --- switch_model/hawaii/ev_advanced.py | 36 ++++++++++++++++----- switch_model/reporting/__init__.py | 50 +++++++++++++++++++++++++----- switch_model/version.py | 2 +- 3 files changed, 73 insertions(+), 15 deletions(-) diff --git a/switch_model/hawaii/ev_advanced.py b/switch_model/hawaii/ev_advanced.py index ffc921902..846885d84 100644 --- a/switch_model/hawaii/ev_advanced.py +++ b/switch_model/hawaii/ev_advanced.py @@ -52,25 +52,47 @@ def rule(m): for t in m.EV_TYPES ) ) - # calculate total fuel cost for ICE (non-EV) VMTs + + # calculate total fuel usage, cost and emissions for ICE (non-EV) vehicles motor_fuel_mmbtu_per_gallon = { # from https://www.eia.gov/Energyexplained/?page=about_energy_units "Motor_Gasoline": 0.120476, "Motor_Diesel": 0.137452 } + m.ice_annual_fuel_mmbtu = Param( + m.LOAD_ZONES, m.EV_TYPES, m.PERIODS, + initialize=lambda m, z, evt, p: + (1.0 - m.ev_share[z, p]) + * m.n_vehicles[z, evt, p] + * m.ice_gals_per_year[z, evt, p] + * motor_fuel_mmbtu_per_gallon[m.ice_fuel[z, evt, p]] + ) + # non-EV fuel cost if hasattr(m, "rfm_supply_tier_cost"): ice_fuel_cost_func = lambda m, z, p, f: m.rfm_supply_tier_cost[m.zone_rfm[z, f], p, 'base'] else: ice_fuel_cost_func = lambda m, z, p, f: m.fuel_cost[z, f, p] m.ice_annual_fuel_cost = Param(m.PERIODS, initialize=lambda m, p: sum( - (1.0 - m.ev_share[z, p]) - * m.n_vehicles[z, t, p] - * m.ice_gals_per_year[z, t, p] - * motor_fuel_mmbtu_per_gallon[m.ice_fuel[z, t, p]] - * ice_fuel_cost_func(m, z, p, m.ice_fuel[z, t, p]) + m.ice_annual_fuel_mmbtu[z, evt, p] + * ice_fuel_cost_func(m, z, p, m.ice_fuel[z, evt, p]) for z in m.LOAD_ZONES - for t in m.EV_TYPES + for evt in m.EV_TYPES + ) + ) + # non-EV annual emissions (currently only used for reporting via + # --save-expression ice_annual_emissions + # TODO: find a way to add this to the AnnualEmissions expression (maybe); + # at present, this doesn't affect the system emissions or emission cost + m.ice_annual_emissions = Param(m.PERIODS, initialize = lambda m, p: + sum( + m.ice_annual_fuel_mmbtu[z, evt, p] + * ( + m.f_co2_intensity[m.ice_fuel[z, evt, p]] + + m.f_upstream_co2_intensity[m.ice_fuel[z, evt, p]] + ) + for z in m.LOAD_ZONES + for evt in m.EV_TYPES ) ) diff --git a/switch_model/reporting/__init__.py b/switch_model/reporting/__init__.py index ecb68e053..fb431e4e8 100644 --- a/switch_model/reporting/__init__.py +++ b/switch_model/reporting/__init__.py @@ -25,7 +25,7 @@ import csv import itertools import cPickle as pickle -from pyomo.environ import value, Var +from pyomo.environ import value, Var, Expression from switch_model.utilities import make_iterable csv.register_dialect( @@ -42,6 +42,10 @@ def define_arguments(argparser): "--sorted-output", default=False, action='store_true', dest='sorted_output', help='Write generic variable result values in sorted order') + argparser.add_argument( + "--save-expressions", "--save-expression", dest="save_expressions", nargs='+', default=['none'], + help="List of expressions to save in addition to variables; can also be 'all' or 'none'." + ) def write_table(instance, *indexes, **kwargs): # there must be a way to accept specific named keyword arguments and @@ -113,7 +117,16 @@ def post_solve(instance, outdir): def save_generic_results(instance, outdir, sorted_output): - for var in instance.component_objects(Var): + components = list(instance.component_objects(Var)) + # add Expression objects that should be saved, if any + if instance.options.save_expressions == ['none']: + pass + elif instance.options.save_expressions == ['all']: + components += list(instance.component_objects(Expression)) + else: + components += [getattr(instance, c) for c in instance.options.save_expressions] + + for var in components: output_file = os.path.join(outdir, '%s.tab' % var.name) with open(output_file, 'wb') as fh: writer = csv.writer(fh, dialect='ampl-tab') @@ -125,14 +138,37 @@ def save_generic_results(instance, outdir, sorted_output): [var.name]) # Results are saved in a random order by default for # increased speed. Sorting is available if wanted. - for key, obj in (sorted(var.items()) - if sorted_output - else var.items()): - writer.writerow(tuple(make_iterable(key)) + (obj.value,)) + items = sorted(var.items()) if sorted_output else var.items() + for key, obj in items: + writer.writerow(tuple(make_iterable(key)) + (get_value(obj),)) else: # single-valued variable writer.writerow([var.name]) - writer.writerow([value(obj)]) + writer.writerow([get_value(obj)]) + +def get_value(obj): + """ + Retrieve value of one element of a Variable or Expression, converting + division-by-zero to nan and uninitialized values to None. + """ + try: + val = value(obj) + except ZeroDivisionError: + # diagnostic expressions sometimes have 0 denominator, + # e.g., AverageFuelCosts for unused fuels; + val = float("nan") + except ValueError as e: + # If variables are not used in constraints or the + # objective function, they will never get values, and + # give a ValueError at this point. + # Note: for variables this could instead use 0 if allowed, or + # otherwise the closest bound. + if getattr(obj, 'value', 0) is None: + val = None + else: + # Caught some other ValueError + raise + return val def save_total_cost_value(instance, outdir): diff --git a/switch_model/version.py b/switch_model/version.py index 3d80c09b1..292a020e0 100644 --- a/switch_model/version.py +++ b/switch_model/version.py @@ -6,4 +6,4 @@ installed and executed in environments that don't have any dependencies installed. """ -__version__='2.0.0' +__version__='2.0.1' From 5cf309ea7d0b311d21c2014f324e8043ccd921d8 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 9 Aug 2018 15:53:10 -1000 Subject: [PATCH 02/38] Improve detection of missing tech_scen_id in back-end database. --- switch_model/hawaii/scenario_data.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/switch_model/hawaii/scenario_data.py b/switch_model/hawaii/scenario_data.py index e2b80fbc2..8188372a1 100644 --- a/switch_model/hawaii/scenario_data.py +++ b/switch_model/hawaii/scenario_data.py @@ -129,15 +129,16 @@ def write_tables(**args): # double-check that arguments are valid cur = db_cursor() - cur.execute( - 'select * from generator_costs_by_year where tech_scen_id = %(tech_scen_id)s', - args - ) - if len([r for r in cur]) == 0: - print "================================================================" - print "WARNING: no records found in generator_costs_by_year for tech_scen_id='{}'".format(args['tech_scen_id']) - print "================================================================" - time.sleep(2) + for table in ['generator_costs_by_year', 'generator_info']: + cur.execute( + 'select * from {} where tech_scen_id = %(tech_scen_id)s limit 1'.format(table), + args + ) + if len(list(cur)) == 0: + print "================================================================" + print "WARNING: no records found in {} for tech_scen_id='{}'".format(table, args['tech_scen_id']) + print "================================================================" + time.sleep(2) del cur ######################### From 2177a3d239a20ec00e8f7cc67f9fd0e3b72b2659 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 9 Aug 2018 15:55:04 -1000 Subject: [PATCH 03/38] Report models with solver warnings and use valid solutions if available --- switch_model/solve.py | 273 +++++++++++++++++++++----------------- switch_model/utilities.py | 81 +++++++---- 2 files changed, 206 insertions(+), 148 deletions(-) diff --git a/switch_model/solve.py b/switch_model/solve.py index 283c76484..3804a73a2 100755 --- a/switch_model/solve.py +++ b/switch_model/solve.py @@ -9,7 +9,7 @@ import pyomo.version from switch_model.utilities import ( - create_model, _ArgumentParser, Logging, StepTimer, make_iterable + create_model, _ArgumentParser, StepTimer, make_iterable, LogOutput, warn ) from switch_model.upgrade import do_inputs_need_upgrade, upgrade_inputs @@ -35,124 +35,124 @@ def debug(type, value, tb): sys.excepthook = debug # Write output to a log file if logging option is specified - stdout_copy = sys.stdout # make a copy of current sys.stdout to return to eventually - if pre_module_options.log_run_to_file: - logging = Logging(pre_module_options.logs_dir) - print "logging run to " + str(logging.log_file_path) - sys.stdout = logging # assign private class to sys.stdout + logs_dir = pre_module_options.logs_dir else: - pass - - # Look out for outdated inputs. This has to happen before modules.txt is - # parsed to avoid errors from incompatible files. - parser = _ArgumentParser(allow_abbrev=False, add_help=False) - add_module_args(parser) - module_options = parser.parse_known_args(args=args)[0] - if(os.path.exists(module_options.inputs_dir) and - do_inputs_need_upgrade(module_options.inputs_dir)): - do_upgrade = query_yes_no( - ("Warning! Your inputs directory needs to be upgraded. " - "Do you want to auto-upgrade now? We'll keep a backup of " - "this current version.")) - if do_upgrade: - upgrade_inputs(module_options.inputs_dir) - else: - print "Inputs need upgrade. Consider `switch upgrade --help`. Exiting." - sys.stdout = stdout_copy - return -1 - - # build a module list based on configuration options, and add - # the current module (to register define_arguments callback) - modules = get_module_list(args) - - # Patch pyomo if needed, to allow reconstruction of expressions. - # This must be done before the model is constructed. - patch_pyomo() - - # Define the model - model = create_model(modules, args=args) - - # Add any suffixes specified on the command line (usually only iis) - add_extra_suffixes(model) - - # return the model as-is if requested - if return_model and not return_instance: - return model - - if model.options.reload_prior_solution: - if not os.path.isdir(model.options.outputs_dir): - raise IOError("Specified outputs directory for solution exploration does not exist.") - - # get a list of modules to iterate through - iterate_modules = get_iteration_list(model) + logs_dir = None # disables logging + + with LogOutput(logs_dir): + + # Look out for outdated inputs. This has to happen before modules.txt is + # parsed to avoid errors from incompatible files. + parser = _ArgumentParser(allow_abbrev=False, add_help=False) + add_module_args(parser) + module_options = parser.parse_known_args(args=args)[0] + if(os.path.exists(module_options.inputs_dir) and + do_inputs_need_upgrade(module_options.inputs_dir)): + do_upgrade = query_yes_no( + ("Warning! Your inputs directory needs to be upgraded. " + "Do you want to auto-upgrade now? We'll keep a backup of " + "this current version.")) + if do_upgrade: + upgrade_inputs(module_options.inputs_dir) + else: + print "Inputs need upgrade. Consider `switch upgrade --help`. Exiting." + stop_logging_output() + return -1 + + # build a module list based on configuration options, and add + # the current module (to register define_arguments callback) + modules = get_module_list(args) + + # Patch pyomo if needed, to allow reconstruction of expressions. + # This must be done before the model is constructed. + patch_pyomo() + + # Define the model + model = create_model(modules, args=args) + + # Add any suffixes specified on the command line (usually only iis) + add_extra_suffixes(model) + + # return the model as-is if requested + if return_model and not return_instance: + return model + + if model.options.reload_prior_solution: + if not os.path.isdir(model.options.outputs_dir): + raise IOError("Specified outputs directory for solution exploration does not exist.") + + # get a list of modules to iterate through + iterate_modules = get_iteration_list(model) + + if model.options.verbose: + print "\n=======================================================================" + print "SWITCH model created in {:.2f} s.\nArguments:".format(timer.step_time()) + print ", ".join(k+"="+repr(v) for k, v in model.options.__dict__.items() if v) + print "Modules:\n"+", ".join(m for m in modules) + if iterate_modules: + print "Iteration modules:", iterate_modules + print "=======================================================================\n" + print "Loading inputs..." + + # create an instance (also reports time spent reading data and loading into model) + instance = model.load_inputs() + + #### Below here, we refer to instance instead of model #### + + instance.pre_solve() + if instance.options.verbose: + print "Total time spent constructing model: {:.2f} s.\n".format(timer.step_time()) - if model.options.verbose: - print "\n=======================================================================" - print "SWITCH model created in {:.2f} s.\nArguments:".format(timer.step_time()) - print ", ".join(k+"="+repr(v) for k, v in model.options.__dict__.items() if v) - print "Modules:\n"+", ".join(m for m in modules) - if iterate_modules: - print "Iteration modules:", iterate_modules - print "=======================================================================\n" - print "Loading inputs..." - - # create an instance (also reports time spent reading data and loading into model) - instance = model.load_inputs() - - #### Below here, we refer to instance instead of model #### - - instance.pre_solve() - if instance.options.verbose: - print "Total time spent constructing model: {:.2f} s.\n".format(timer.step_time()) - - # return the instance as-is if requested - if return_instance: - if return_model: - return (model, instance) - else: - return instance + # return the instance as-is if requested + if return_instance: + if return_model: + return (model, instance) + else: + return instance - if instance.options.reload_prior_solution: - reload_prior_solution_from_pickle(instance, instance.options.outputs_dir) - if instance.options.verbose: - print( - 'Loaded previous results into model instance in {:.2f} s.' - .format(timer.step_time()) - ) - else: - # make sure the outputs_dir exists (used by some modules during iterate) - # use a race-safe approach in case this code is run in parallel - try: - os.makedirs(instance.options.outputs_dir) - except OSError: - # directory probably exists already, but double-check - if not os.path.isdir(instance.options.outputs_dir): - raise - - # solve the model (reports time for each step as it goes) - if iterate_modules: + if instance.options.reload_prior_solution: + reload_prior_solution_from_pickle(instance, instance.options.outputs_dir) if instance.options.verbose: - print "Iterating model..." - iterate(instance, iterate_modules) + print( + 'Loaded previous results into model instance in {:.2f} s.' + .format(timer.step_time()) + ) else: - results = solve(instance) - if instance.options.verbose: - print "Optimization termination condition was {}.\n".format( - results.solver.termination_condition) + # make sure the outputs_dir exists (used by some modules during iterate) + # use a race-safe approach in case this code is run in parallel + try: + os.makedirs(instance.options.outputs_dir) + except OSError: + # directory probably exists already, but double-check + if not os.path.isdir(instance.options.outputs_dir): + raise + + # solve the model (reports time for each step as it goes) + if iterate_modules: + if instance.options.verbose: + print "Iterating model..." + iterate(instance, iterate_modules) + else: + results = solve(instance) + if instance.options.verbose: + print "" + print results.solver.message + print "Optimization termination condition was {}.".format( + results.solver.termination_condition) + print "" - if instance.options.verbose: - timer.step_time() # restart counter for next step + if instance.options.verbose: + timer.step_time() # restart counter for next step - # report/save results - if instance.options.verbose: - print "Executing post solve functions..." - instance.post_solve() - if instance.options.verbose: - print "Post solve processing completed in {:.2f} s.".format(timer.step_time()) + # report/save results + if instance.options.verbose: + print "Executing post solve functions..." + instance.post_solve() + if instance.options.verbose: + print "Post solve processing completed in {:.2f} s.".format(timer.step_time()) - # return stdout to original - sys.stdout = stdout_copy + # end of LogOutput block if instance.options.interact or instance.options.reload_prior_solution: m = instance # present the solved model as 'm' for convenience @@ -602,19 +602,15 @@ def solve(model): TempfileManager.tempdir = model.options.tempdir results = model.solver_manager.solve(model, opt=model.solver, **solver_args) + #import pdb; pdb.set_trace() if model.options.verbose: print "Solved model. Total time spent in solver: {:2f} s.".format(timer.step_time()) - # Only return if the model solved correctly, otherwise throw a useful error - if(results.solver.status in {SolverStatus.ok, SolverStatus.warning} and - results.solver.termination_condition == TerminationCondition.optimal): - # Cache a copy of the results object, to allow saving and restoring model - # solutions later. - model.last_results = results - # Successful solution, return results - return results - elif (results.solver.termination_condition == TerminationCondition.infeasible): + # Treat infeasibility as an error, rather than trying to load and save the results + # (note: in this case, results.solver.status may be SolverStatus.warning instead of + # SolverStatus.error) + if (results.solver.termination_condition == TerminationCondition.infeasible): if hasattr(model, "iis"): print "Model was infeasible; irreducibly inconsistent set (IIS) returned by solver:" print "\n".join(sorted(c.name for c in model.iis)) @@ -623,15 +619,48 @@ def solve(model): print "more information may be available by setting the appropriate flags in the " print 'solver_options_string and calling this script with "--suffixes iis".' raise RuntimeError("Infeasible model") - else: - print "Solver terminated abnormally." + + # Raise an error if the solver failed to produce a solution + # Note that checking for results.solver.status in {SolverStatus.ok, + # SolverStatus.warning} is not enough because with a warning there will + # sometimes be a solution and sometimes not. + # Note: the results object originally contains values for model components + # in results.solution.variable, etc., but pyomo.solvers.solve erases it via + # result.solution.clear() after calling model.solutions.load_from() with it. + # load_from() loads values into the model.solutions._entry, so we check there. + # (See pyomo.PyomoModel.ModelSolutions.add_solution() for the code that + # actually creates _entry). + # Another option might be to check that model.solutions[-1].status (previously + # result.solution.status, but also cleared) is in + # pyomo.opt.SolutionStatus.['optimal', 'bestSoFar', 'feasible', 'globallyOptimal', 'locallyOptimal'], + # but this seems pretty foolproof (if undocumented). + if len(model.solutions[-1]._entry['variable']) == 0: + # no solution returned + print "Solver terminated without a solution." print " Solver Status: ", results.solver.status + print " Solution Status: ", model.solutions[-1].status print " Termination Condition: ", results.solver.termination_condition if model.options.solver == 'glpk' and results.solver.termination_condition == TerminationCondition.other: print "Hint: glpk has been known to classify infeasible problems as 'other'." raise RuntimeError("Solver failed to find an optimal solution.") - # no default return, because we'll never reach here + # Report any warnings; these are written to stderr so users can find them in + # error logs (e.g. on HPC systems). These can occur, e.g., if solver reaches + # time limit or iteration limit but still returns a valid solution + if results.solver.status == SolverStatus.warning: + warn( + "Solver terminated with warning.\n" + + " Solution Status: {}\n".format(model.solutions[-1].status) + + " Termination Condition: {}".format(results.solver.termination_condition) + ) + + ### process and return solution ### + + # Cache a copy of the results object, to allow saving and restoring model + # solutions later. + model.last_results = results + return results + # taken from https://software.sandia.gov/trac/pyomo/browser/pyomo/trunk/pyomo/opt/base/solvers.py?rev=10784 # This can be removed when all users are on Pyomo 4.2 diff --git a/switch_model/utilities.py b/switch_model/utilities.py index 0f5374d51..0d1add938 100644 --- a/switch_model/utilities.py +++ b/switch_model/utilities.py @@ -535,38 +535,67 @@ def approx_equal(a, b, tolerance=0.01): def default_solver(): return pyomo.opt.SolverFactory('glpk') - -class Logging: +def warn(message): """ - Assign standard output and a log file as output destinations. This is accomplished by assigning this class - to sys.stdout. + Send warning message to sys.stderr. + Unlike warnings.warn, this does not add the current line of code to the message. """ - def __init__(self, logs_dir): - # Make logs directory if class is initialized - if not os.path.exists(logs_dir): - os.makedirs(logs_dir) - - # Assign sys.stdout and a log file as locations to write to - self.terminal = sys.stdout - self.log_file_path = os.path.join(logs_dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + ".log") - self.log_file = open(self.log_file_path, "w", buffering=1) + sys.stderr.write("WARNING: " + message + '\n') - def __getattr__(self, attr): +class TeeStream: + """ + Virtual stream that writes output to both stream1 and stream2. Attributes + of stream1 will be reported to callers if needed. For example, specifying + `sys.stdout=TeeStream(sys.stdout, log_file_handle)` will copy + output destined for sys.stdout to log_file_handle as well. + """ + def __init__(self, stream1, stream2): + self.stream1 = stream1 + self.stream2 = stream2 + def __getattr__(self, *args, **kwargs): """ - Default to sys.stdout attributes when calling attributes for this class. - This is here to prevent unintended consequences for code that assumes sys.stdout is an object with its own + Provide stream1 attributes when attributes are requested for this class. + This supports code that assumes sys.stdout is an object with its own methods, etc. """ - return getattr(self.terminal, attr) - - def write(self, message): - self.terminal.write(message) - self.log_file.write(message) - - def flush(self): - self.terminal.flush() - self.log_file.flush() - + return getattr(self.stream1, *args, **kwargs) + def write(self, *args, **kwargs): + self.stream1.write(*args, **kwargs) + self.stream2.write(*args, **kwargs) + def flush(self, *args, **kwargs): + self.stream1.flush(*args, **kwargs) + self.stream2.flush(*args, **kwargs) + +class LogOutput(object): + """ + Copy output sent to stdout or stderr to a log file in the specified directory. + Takes no action if directory is None. Log file is named based on the current + date and time. Directory will be created if needed, and file will be overwritten + if it already exists (unlikely). + """ + def __init__(self, logs_dir): + self.logs_dir = logs_dir + def __enter__(self): + """ start copying output to log file """ + if self.logs_dir is not None: + if not os.path.exists(self.logs_dir): + os.makedirs(self.logs_dir) + log_file_path = os.path.join( + self.logs_dir, + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + ".log" + ) + self.log_file = open(log_file_path, "w", buffering=1) + self.stdout = sys.stdout + self.stderr = sys.stderr + sys.stdout = TeeStream(sys.stdout, self.log_file) + sys.stderr = TeeStream(sys.stderr, self.log_file) + print "logging output to " + str(log_file_path) + def __exit__(self, type, value, traceback): + """ restore original output streams and close log file """ + if self.logs_dir is not None: + sys.stdout = self.stdout + sys.stderr = self.stderr + self.log_file.close() def iteritems(obj): """ Iterator of key, value pairs for obj; From 55614ccd928ba85621bcd94f0c7f7c4e6e45cbbe Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 9 Aug 2018 20:58:27 -1000 Subject: [PATCH 04/38] Allow deactivation of hydrogen module via --no-hydrogen flag --- switch_model/hawaii/hydrogen.py | 61 ++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 27 deletions(-) diff --git a/switch_model/hawaii/hydrogen.py b/switch_model/hawaii/hydrogen.py index e23e18c14..0a3af6ea0 100644 --- a/switch_model/hawaii/hydrogen.py +++ b/switch_model/hawaii/hydrogen.py @@ -3,13 +3,20 @@ from switch_model.financials import capital_recovery_factor as crf def define_arguments(argparser): - argparser.add_argument('--hydrogen-reserve-types', nargs='+', default=['spinning'], + argparser.add_argument('--hydrogen-reserve-types', nargs='+', default=['spinning'], help= "Type(s) of reserves to provide from hydrogen infrastructure (e.g., 'contingency regulation'). " "Specify 'none' to disable." ) + argparser.add_argument('--no-hydrogen', action='store_true', default=False, + help="Don't allow construction of any hydrogen infrastructure." + ) def define_components(m): + if not m.options.no_hydrogen: + define_hydrogen_components(m) + +def define_hydrogen_components(m): # electrolyzer details m.hydrogen_electrolyzer_capital_cost_per_mw = Param() @@ -18,7 +25,7 @@ def define_components(m): m.hydrogen_electrolyzer_kg_per_mwh = Param() # assumed to deliver H2 at enough pressure for liquifier and daily buffering m.hydrogen_electrolyzer_life_years = Param() m.BuildElectrolyzerMW = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) - m.ElectrolyzerCapacityMW = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: + m.ElectrolyzerCapacityMW = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: sum(m.BuildElectrolyzerMW[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS[p])) m.RunElectrolyzerMW = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) m.ProduceHydrogenKgPerHour = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: @@ -36,13 +43,13 @@ def define_components(m): m.hydrogen_liquifier_mwh_per_kg = Param() m.hydrogen_liquifier_life_years = Param() m.BuildLiquifierKgPerHour = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) # capacity to build, measured in kg/hour of throughput - m.LiquifierCapacityKgPerHour = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: + m.LiquifierCapacityKgPerHour = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: sum(m.BuildLiquifierKgPerHour[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS[p])) m.LiquifyHydrogenKgPerHour = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) m.LiquifyHydrogenMW = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: m.LiquifyHydrogenKgPerHour[z, t] * m.hydrogen_liquifier_mwh_per_kg ) - + # storage tank details m.liquid_hydrogen_tank_capital_cost_per_kg = Param() m.liquid_hydrogen_tank_minimum_size_kg = Param(default=0.0) @@ -63,7 +70,7 @@ def define_components(m): m.hydrogen_fuel_cell_mwh_per_kg = Param() m.hydrogen_fuel_cell_life_years = Param() m.BuildFuelCellMW = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) - m.FuelCellCapacityMW = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: + m.FuelCellCapacityMW = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: sum(m.BuildFuelCellMW[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS[p])) m.DispatchFuelCellMW = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) m.ConsumeHydrogenKgPerHour = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: @@ -71,19 +78,19 @@ def define_components(m): ) # hydrogen mass balances - # note: this allows for buffering of same-day production and consumption + # note: this allows for buffering of same-day production and consumption # of hydrogen without ever liquifying it m.Hydrogen_Conservation_of_Mass_Daily = Constraint(m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, z, ts: m.StoreLiquidHydrogenKg[z, ts] - m.WithdrawLiquidHydrogenKg[z, ts] - == + == m.ts_duration_of_tp[ts] * sum( - m.ProduceHydrogenKgPerHour[z, tp] - m.ConsumeHydrogenKgPerHour[z, tp] + m.ProduceHydrogenKgPerHour[z, tp] - m.ConsumeHydrogenKgPerHour[z, tp] for tp in m.TPS_IN_TS[ts] ) ) m.Hydrogen_Conservation_of_Mass_Annual = Constraint(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: sum( - (m.StoreLiquidHydrogenKg[z, ts] - m.WithdrawLiquidHydrogenKg[z, ts]) + (m.StoreLiquidHydrogenKg[z, ts] - m.WithdrawLiquidHydrogenKg[z, ts]) * m.ts_scale_to_year[ts] for ts in m.TS_IN_PERIOD[p] ) == 0 @@ -102,22 +109,22 @@ def define_components(m): m.Set_BuildAnyLiquidHydrogenTank_Flag = Constraint(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: Constraint.Skip if m.liquid_hydrogen_tank_minimum_size_kg == 0.0 else ( - m.BuildLiquidHydrogenTankKg[z, p] - <= + m.BuildLiquidHydrogenTankKg[z, p] + <= 1000 * m.BuildAnyLiquidHydrogenTank[z, p] * m.liquid_hydrogen_tank_minimum_size_kg ) ) m.Build_Minimum_Liquid_Hydrogen_Tank = Constraint(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: Constraint.Skip if m.liquid_hydrogen_tank_minimum_size_kg == 0.0 else ( - m.BuildLiquidHydrogenTankKg[z, p] - >= + m.BuildLiquidHydrogenTankKg[z, p] + >= m.BuildAnyLiquidHydrogenTank[z, p] * m.liquid_hydrogen_tank_minimum_size_kg ) ) # maximum amount that hydrogen fuel cells can contribute to system reserves - # Note: we assume we can't use fuel cells for reserves unless we've also built at least half + # Note: we assume we can't use fuel cells for reserves unless we've also built at least half # as much electrolyzer capacity and a tank that can provide the reserves for 12 hours # (this is pretty arbitrary, but avoids just installing a fuel cell as a "free" source of reserves) m.HydrogenFuelCellMaxReservePower = Var(m.LOAD_ZONES, m.TIMEPOINTS) @@ -139,17 +146,17 @@ def define_components(m): # how much extra power could hydrogen equipment produce or absorb on short notice (for reserves) m.HydrogenSlackUp = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.RunElectrolyzerMW[z, t] + m.RunElectrolyzerMW[z, t] + m.LiquifyHydrogenMW[z, t] + m.HydrogenFuelCellMaxReservePower[z, t] - m.DispatchFuelCellMW[z, t] ) m.HydrogenSlackDown = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: - m.ElectrolyzerCapacityMW[z, m.tp_period[t]] - m.RunElectrolyzerMW[z, t] + m.ElectrolyzerCapacityMW[z, m.tp_period[t]] - m.RunElectrolyzerMW[z, t] # ignore liquifier potential since it's small and this is a low-value reserve product + m.DispatchFuelCellMW[z, t] ) - + # there must be enough storage to hold _all_ the production each period (net of same-day consumption) # note: this assumes we cycle the system only once per year (store all energy, then release all energy) # alternatives: allow monthly or seasonal cycling, or directly model the whole year with inter-day linkages @@ -157,7 +164,7 @@ def define_components(m): sum(m.StoreLiquidHydrogenKg[z, ts] * m.ts_scale_to_year[ts] for ts in m.TS_IN_PERIOD[p]) <= m.LiquidHydrogenTankCapacityKg[z, p] ) - + # add electricity consumption and production to the zonal energy balance m.Zone_Power_Withdrawals.append('RunElectrolyzerMW') m.Zone_Power_Withdrawals.append('LiquifyHydrogenMW') @@ -190,20 +197,20 @@ def define_components(m): ) m.Cost_Components_Per_TP.append('HydrogenVariableCost') m.Cost_Components_Per_Period.append('HydrogenFixedCostAnnual') - + # Register with spinning reserves if it is available if [rt.lower() for rt in m.options.hydrogen_reserve_types] != ['none']: # Register with spinning reserves if hasattr(m, 'Spinning_Reserve_Up_Provisions'): # calculate available slack from hydrogen equipment m.HydrogenSlackUpForArea = Expression( - m.BALANCING_AREA_TIMEPOINTS, + m.BALANCING_AREA_TIMEPOINTS, rule=lambda m, b, t: sum(m.HydrogenSlackUp[z, t] for z in m.ZONES_IN_BALANCING_AREA[b]) ) m.HydrogenSlackDownForArea = Expression( - m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, b, t: sum(m.HydrogenSlackDown[z, t] for z in m.ZONES_IN_BALANCING_AREA[b]) ) if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): @@ -223,16 +230,16 @@ def define_components(m): ) # constrain reserve provision within available slack m.Limit_HydrogenSpinningReserveUp = Constraint( - m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: sum( m.HydrogenSpinningReserveUp[rt, ba, tp] for rt in m.HYDROGEN_SPINNING_RESERVE_TYPES ) <= m.HydrogenSlackUpForArea[ba, tp] ) m.Limit_HydrogenSpinningReserveDown = Constraint( - m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: sum( m.HydrogenSpinningReserveDown[rt, ba, tp] for rt in m.HYDROGEN_SPINNING_RESERVE_TYPES @@ -252,7 +259,7 @@ def define_components(m): def load_inputs(mod, switch_data, inputs_dir): """ - Import hydrogen data from a .dat file. + Import hydrogen data from a .dat file. TODO: change this to allow multiple storage technologies. """ switch_data.load(filename=os.path.join(inputs_dir, 'hydrogen.dat')) From 1d89c84583d72ce8b8b5a03987742eb743e0a535 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 9 Aug 2018 21:04:35 -1000 Subject: [PATCH 05/38] Carry --no-hydrogen argument through to load_inputs --- switch_model/hawaii/hydrogen.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/switch_model/hawaii/hydrogen.py b/switch_model/hawaii/hydrogen.py index 0a3af6ea0..998f028f0 100644 --- a/switch_model/hawaii/hydrogen.py +++ b/switch_model/hawaii/hydrogen.py @@ -15,7 +15,7 @@ def define_arguments(argparser): def define_components(m): if not m.options.no_hydrogen: define_hydrogen_components(m) - + def define_hydrogen_components(m): # electrolyzer details @@ -257,9 +257,10 @@ def define_hydrogen_components(m): m.Spinning_Reserve_Down_Provisions.append('HydrogenSlackDownForArea') -def load_inputs(mod, switch_data, inputs_dir): +def load_inputs(m, switch_data, inputs_dir): """ Import hydrogen data from a .dat file. TODO: change this to allow multiple storage technologies. """ - switch_data.load(filename=os.path.join(inputs_dir, 'hydrogen.dat')) + if not m.options.no_hydrogen: + switch_data.load(filename=os.path.join(inputs_dir, 'hydrogen.dat')) From 6919e7cc88cce0f025e72b2eb11f72180aa9618e Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Sat, 11 Aug 2018 10:40:19 -1000 Subject: [PATCH 06/38] Restart interrupted scenarios with the same --job-id argument or SWITCH_JOB_ID environment variable. --- switch_model/solve_scenarios.py | 63 +++++++++++---------------------- 1 file changed, 20 insertions(+), 43 deletions(-) diff --git a/switch_model/solve_scenarios.py b/switch_model/solve_scenarios.py index 25fb17585..9cef9560b 100755 --- a/switch_model/solve_scenarios.py +++ b/switch_model/solve_scenarios.py @@ -53,50 +53,27 @@ requested_scenarios = scenario_manager_args.scenarios scenario_list_file = scenario_manager_args.scenario_list scenario_queue_dir = scenario_manager_args.scenario_queue -job_id = scenario_manager_args.job_id -# Get a unique job id. Note: in the past we have tried to get a -# persistent ID for each parallel task, so that it could requeue any -# jobs it was previously working on when it restarted. However, this -# is kludgy to begin with (only works if restarted under similar conditions) -# and tends to fail. It also cannot work with a bare "srun" or "mpirun" -# command, which might launch 20+ tasks that end up thinking they're the -# same job and race to reset the queue. -job_id = socket.gethostname() + '_' + str(os.getpid()) - -# # Make a best effort to get a unique, persistent job_id for each job. -# # This is used to clear the queue of running tasks if a task is stopped and -# # restarted. (would be better if other jobs could do this when this job dies -# # but it's hard to see how they can detect when this job fails.) -# # (The idea is that the user will run multiple jobs in parallel, with one -# # thread per job, to process all the scenarios. These might be run in separate -# # terminal windows, or in separate instances of gnu screen, or as numbered -# # jobs on an HPC system. Sometimes a job will get interrupted, e.g., if the -# # user presses ctrl-c in a terminal window or if the job is launched on an -# # interruptible queue. This script attempts to detect when that job gets -# # relaunched, and re-run the interrupted scenario.) -# if job_id is None: -# job_id = os.environ.get('JOB_ID') # could be set by user -# if job_id is None: -# job_id = os.environ.get('JOBID') # could be set by user -# if job_id is None: -# job_id = os.environ.get('SLURM_JOBID') -# if job_id is None: -# job_id = os.environ.get('OMPI_MCA_ess_base_jobid') -# if job_id is None: -# # construct one from hostname and parent's pid -# # this way, each job launched from a different terminal window -# # or different instance of gnu screen will have a persistent ID -# # (This won't work on Windows before Python 3.2; in that case, -# # users should specify a --job-id or set an environment variable -# # when running multiple jobs in parallel. Without that, all -# # jobs will think they have the same ID, and at startup they will -# # try to re-run the scenario currently being run by some other job.) -# if hasattr(os, 'getppid'): -# job_id = socket.gethostname() + '_' + str(os.getppid()) -# else: -# # won't be able to automatically clear previously interrupted job -# job_id = socket.gethostname() + '_' + str(os.getpid()) +# Get a unique task id. +# This is used to requeue any scenario that this task was working on that got +# interrupted. This is useful for running jobs on a pre-emptable cluster. +# Note: in the past we have tried to get a persistent ID for each parallel task +# by inspecting the cluster computing batch environment or looking at the parent's +# pid (useful when launching several instances of `switch solve-scenarios` in +# different terminals on a desktop). However, that only works if tasks are +# restarted under similar conditions. It also fails if users run one job on a +# cluster that launches several instances of solve-scenarios via a direct call to +# "srun" or "mpirun". That launches many tasks that all end up thinking they're +# the same task and race to reset the queue. So now it is up to the user to +# specify a unique task id in an environment variable or command-line argument. +# If a job id is not specified, interrupted jobs will not be restarted. +job_id = scenario_manager_args.job_id +if job_id is None: + job_id = os.environ.get('SWITCH_JOB_ID') +if job_id is None: + # this cannot be running in parallel with another task with the same pid on + # the same host, so it's safe to requeue any jobs with this id + job_id = socket.gethostname() + '_' + str(os.getpid()) running_scenarios_file = os.path.join(scenario_queue_dir, job_id+"_running.txt") From 7159ac884bb13619ff53ab948646845d3b624368 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Mon, 13 Aug 2018 09:55:23 -1000 Subject: [PATCH 07/38] Merge repeated calls to --include-module(s), --exclude-module(s), --save-expression(s), --suffix(es) or --scenario(s) --- switch_model/reporting/__init__.py | 15 ++++++--- switch_model/solve.py | 36 +++++++++++++------- switch_model/solve_scenarios.py | 5 ++- switch_model/utilities.py | 54 ++++++++++++++++++++++++++++-- 4 files changed, 91 insertions(+), 19 deletions(-) diff --git a/switch_model/reporting/__init__.py b/switch_model/reporting/__init__.py index fb431e4e8..3cd64a616 100644 --- a/switch_model/reporting/__init__.py +++ b/switch_model/reporting/__init__.py @@ -43,7 +43,8 @@ def define_arguments(argparser): dest='sorted_output', help='Write generic variable result values in sorted order') argparser.add_argument( - "--save-expressions", "--save-expression", dest="save_expressions", nargs='+', default=['none'], + "--save-expressions", "--save-expression", dest="save_expressions", nargs='+', + default=[], action='extend', help="List of expressions to save in addition to variables; can also be 'all' or 'none'." ) @@ -119,9 +120,15 @@ def post_solve(instance, outdir): def save_generic_results(instance, outdir, sorted_output): components = list(instance.component_objects(Var)) # add Expression objects that should be saved, if any - if instance.options.save_expressions == ['none']: - pass - elif instance.options.save_expressions == ['all']: + if 'none' in instance.options.save_expressions: + # drop everything up till the last 'none' (users may have added more after that) + last_none = ( + len(instance.options.save_expressions) + - instance.options.save_expressions[::-1].index('none') + ) + instance.options.save_expressions = instance.options.save_expressions[last_none:] + + if 'all' in instance.options.save_expressions: components += list(instance.component_objects(Expression)) else: components += [getattr(instance, c) for c in instance.options.save_expressions] diff --git a/switch_model/solve.py b/switch_model/solve.py index 3804a73a2..1202ba784 100755 --- a/switch_model/solve.py +++ b/switch_model/solve.py @@ -354,7 +354,7 @@ def define_arguments(argparser): # note: pyomo has a --solver-suffix option but it is not clear # whether that does the same thing as --suffix defined here, # so we don't reuse the same name. - argparser.add_argument("--suffixes", "--suffix", nargs="+", default=['rc','dual','slack'], + argparser.add_argument("--suffixes", "--suffix", nargs="+", action='extend', default=['rc','dual','slack'], help="Extra suffixes to add to the model and exchange with the solver (e.g., iis, rc, dual, or slack)") # Define solver-related arguments @@ -414,12 +414,14 @@ def add_module_args(parser): help='Text file with a list of modules to include in the model (default is "modules.txt")' ) parser.add_argument( - "--include-modules", "--include-module", dest="include_modules", nargs='+', default=[], - help="Module(s) to add to the model in addition to any specified with --module-list" + "--include-modules", "--include-module", dest="include_exclude_modules", nargs='+', + action='include', default=[], + help="Module(s) to add to the model in addition to any specified with --module-list file" ) parser.add_argument( - "--exclude-modules", "--exclude-module", dest="exclude_modules", nargs='+', default=[], - help="Module(s) to remove from the model after processing --module-list and --include-modules" + "--exclude-modules", "--exclude-module", dest="include_exclude_modules", nargs='+', + action='exclude', default=[], + help="Module(s) to remove from the model after processing --module-list file and prior --include-modules arguments" ) # note: we define --inputs-dir here because it may be used to specify the location of # the module list, which is needed before it is loaded. @@ -481,14 +483,24 @@ def get_module_list(args): modules = [r.strip() for r in f.read().splitlines()] modules = [m for m in modules if m and not m.startswith("#")] - # add additional modules requested by the user - modules.extend(module_options.include_modules) - - # remove modules requested by the user - for module_name in module_options.exclude_modules: - modules.remove(module_name) + # adjust modules as requested by the user + # include_exclude_modules format: [('include', [mod1, mod2]), ('exclude', [mod3])] + for action, mods in module_options.include_exclude_modules: + if action == 'include': + for module_name in mods: + if module_name not in modules: # maybe we should raise an error if already present? + modules.append(module_name) + if action == 'exclude': + for module_name in mods: + try: + modules.remove(module_name) + except ValueError: + raise ValueError( # maybe we should just pass? + 'Unable to exclude module {} because it was not ' + 'previously included.'.format(module_name) + ) - # add the current module, since it has callbacks, e.g. define_arguments for iteration and suffixes + # add this module, since it has callbacks, e.g. define_arguments for iteration and suffixes modules.append("switch_model.solve") return modules diff --git a/switch_model/solve_scenarios.py b/switch_model/solve_scenarios.py index 9cef9560b..8b9b8dbee 100755 --- a/switch_model/solve_scenarios.py +++ b/switch_model/solve_scenarios.py @@ -37,7 +37,10 @@ parser = _ArgumentParser( allow_abbrev=False, description='Solve one or more Switch scenarios.' ) -parser.add_argument('--scenario', '--scenarios', nargs='+', dest='scenarios', default=[]) +parser.add_argument( + '--scenario', '--scenarios', nargs='+', dest='scenarios', + default=[], action='extend' +) #parser.add_argument('--scenarios', nargs='+', default=[]) parser.add_argument("--scenario-list", default="scenarios.txt") parser.add_argument("--scenario-queue", default="scenario_queue") diff --git a/switch_model/utilities.py b/switch_model/utilities.py index 0d1add938..6ae43e38b 100644 --- a/switch_model/utilities.py +++ b/switch_model/utilities.py @@ -504,11 +504,11 @@ def load_aug(switch_data, optional=False, auto_select=False, # which will be consumed by one of their modules, the default parser would # match that to "--exclude-modules" during the early, partial parse.) if sys.version_info >= (3, 5): - _ArgumentParser = argparse.ArgumentParser + _ArgumentParserAllowAbbrev = argparse.ArgumentParser else: # patch ArgumentParser to accept the allow_abbrev flag # (works on Python 2.7 and maybe others) - class _ArgumentParser(argparse.ArgumentParser): + class _ArgumentParserAllowAbbrev(argparse.ArgumentParser): def __init__(self, *args, **kwargs): if not kwargs.get("allow_abbrev", True): if hasattr(self, "_get_option_tuples"): @@ -527,6 +527,56 @@ def new_get_option_tuples(self, option_string): kwargs.pop("allow_abbrev", None) return argparse.ArgumentParser.__init__(self, *args, **kwargs) +class ExtendAction(argparse.Action): + """Create or extend list with the provided items""" + # from https://stackoverflow.com/a/41153081/3830997 + def __call__(self, parser, namespace, values, option_string=None): + items = getattr(namespace, self.dest) or [] + items.extend(values) + setattr(namespace, self.dest, items) + +class IncludeAction(argparse.Action): + """Flag the specified items for inclusion in the model""" + def __call__(self, parser, namespace, values, option_string=None): + items = getattr(namespace, self.dest) or [] + items.append(('include', values)) + setattr(namespace, self.dest, items) +class ExcludeAction(argparse.Action): + """Flag the specified items for exclusion from the model""" + def __call__(self, parser, namespace, values, option_string=None): + items = getattr(namespace, self.dest) or [] + items.append(('exclude', values)) + setattr(namespace, self.dest, items) + +# TODO: merge the _ArgumentParserAllowAbbrev code into this class +class _ArgumentParser(_ArgumentParserAllowAbbrev): + """ + Custom version of ArgumentParser: + - warns about a bug in standard Python ArgumentParser for --arg="some words" + - allows use of 'extend', 'include' and 'exclude' actions to accumulate lists + with multiple calls + """ + def __init__(self, *args, **kwargs): + super(_ArgumentParser, self).__init__(*args, **kwargs) + self.register('action', 'extend', ExtendAction) + self.register('action', 'include', IncludeAction) + self.register('action', 'exclude', ExcludeAction) + + def parse_known_args(self, args=None, namespace=None): + # parse_known_args parses arguments like --list-arg a b --other-arg="something with space" + # as list_arg=['a', 'b', '--other-arg="something with space"']. + # See https://bugs.python.org/issue34390. + # We issue a warning to avoid this. + if args is not None: + for a in args: + if a.startswith('--') and '=' in a: + print( + "Warning: argument '{}' may be parsed incorrectly. It is " + "safer to use ' ' instead of '=' as a separator." + .format(a) + ) + return super(_ArgumentParser, self).parse_known_args(args, namespace) + def approx_equal(a, b, tolerance=0.01): return abs(a-b) <= (abs(a) + abs(b)) / 2.0 * tolerance From 30df74cf08fbed93d76b1385c9b17ca6ea08e870 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Mon, 13 Aug 2018 09:58:59 -1000 Subject: [PATCH 08/38] Add --quiet and --no-stream-solver arguments to cancel --verbose and --stream-solver --- switch_model/solve.py | 11 +++++++---- switch_model/solve_scenarios.py | 10 +++++++++- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/switch_model/solve.py b/switch_model/solve.py index 1202ba784..8a6a4e28b 100755 --- a/switch_model/solve.py +++ b/switch_model/solve.py @@ -374,6 +374,9 @@ def define_arguments(argparser): argparser.add_argument( "--stream-output", "--stream-solver", action='store_true', dest="tee", default=None, help="Display information from the solver about its progress (usually combined with a suitable --solver-options-string)") + argparser.add_argument( + "--no-stream-output", "--no-stream-solver", action='store_false', dest="tee", default=None, + help="Don't display information from the solver about its progress") argparser.add_argument( "--symbolic-solver-labels", action='store_true', default=None, help='Use symbol names derived from the model when interfacing with the solver. ' @@ -395,11 +398,11 @@ def define_arguments(argparser): # General purpose arguments argparser.add_argument( - '--verbose', '-v', default=False, action='store_true', + '--verbose', '-v', dest='verbose', default=False, action='store_true', help='Show information about model preparation and solution') - # argparser.add_argument( - # '--quiet', '-q', dest='verbose', action='store_false', - # help="Don't show information about model preparation and solution (cancels --verbose setting)") + argparser.add_argument( + '--quiet', '-q', dest='verbose', action='store_false', + help="Don't show information about model preparation and solution (cancels --verbose setting)") argparser.add_argument( '--interact', default=False, action='store_true', help='Enter interactive shell after solving the instance to enable inspection of the solved model.') diff --git a/switch_model/solve_scenarios.py b/switch_model/solve_scenarios.py index 8b9b8dbee..394f20c37 100755 --- a/switch_model/solve_scenarios.py +++ b/switch_model/solve_scenarios.py @@ -190,10 +190,18 @@ def get_scenario_name(scenario_args): # use ad-hoc parsing to extract the scenario name from a scenario-definition string return parse_arg("--scenario-name", default=None, args=scenario_args) +def last_index(lst, val): + try: + return len(lst) - lst[::-1].index(val) - 1 + except ValueError: + return -1 + def is_verbose(scenario_args): # check options settings for --verbose flag + # we can't use parse_arg, because we need to process both --verbose and --quiet # note: this duplicates settings in switch_model.solve, so it may fall out of date - return parse_arg("--verbose", action='store_true', default=False, args=scenario_args) + return last_index(scenario_args, '--verbose') >= last_index(scenario_args, '--quiet') + # return parse_arg("--verbose", action='store_true', default=False, args=scenario_args) def get_scenario_dict(): # note: we read the list from the disk each time so that we get a fresher version From 57ecb68ba1fe06a5cda139784c4d1c6b6db2a7a8 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Mon, 13 Aug 2018 15:43:57 -1000 Subject: [PATCH 09/38] Move save_results to core and post_solve on reload It is sometimes necessary to reload models to run new post_solve code, e.g., if new outputs are defined after time-consuming models have already been run. Previously, --reload-prior-solution just reloaded the model and dumped the user into an interactive prompt, with no easy way to run module code. This commit changes the behavior of --reload-prior-solution, so that it now acts like an exact alternative to re-running the model, i.e., if the user specifies --reload-prior-solution, we reload the solution, then run the normal post-solve code, and don't drop to an interactive prompt unless they also specify --interact. Since this behavior is now central to the use of the model rather than a quasi-reporting behavior, this commit also moves save_results() back from switch_model.reporting to switch_model.solve. This commit also adds a --no-save-solution flag to disable automatic solution- saving, which is helpful for models that will be solved repeatedly and don't need to be reloaded. --- switch_model/reporting/__init__.py | 18 --------- switch_model/solve.py | 63 +++++++++++++++++++++--------- 2 files changed, 45 insertions(+), 36 deletions(-) diff --git a/switch_model/reporting/__init__.py b/switch_model/reporting/__init__.py index 3cd64a616..3d6f78ccd 100644 --- a/switch_model/reporting/__init__.py +++ b/switch_model/reporting/__init__.py @@ -114,7 +114,6 @@ def post_solve(instance, outdir): save_generic_results(instance, outdir, instance.options.sorted_output) save_total_cost_value(instance, outdir) save_cost_components(instance, outdir) - save_results(instance, outdir) def save_generic_results(instance, outdir, sorted_output): @@ -211,20 +210,3 @@ def save_cost_components(m, outdir): values=lambda m, c: (c, cost_dict[c]), digits=16 ) - - -def save_results(instance, outdir): - """ - Save model solution for later reuse. - - Note that this pickles a solver results object because the instance itself - cannot be pickled -- see - https://stackoverflow.com/questions/39941520/pyomo-ipopt-does-not-return-solution - """ - # First, save the full solution data to the results object, because recent - # versions of Pyomo only store execution metadata there by default. - instance.solutions.store_to(instance.last_results) - with open(os.path.join(outdir, 'results.pickle'), 'wb') as fh: - pickle.dump(instance.last_results, fh, protocol=-1) - # remove the solution from the results object, to minimize long-term memory use - instance.last_results.solution.clear() diff --git a/switch_model/solve.py b/switch_model/solve.py index 8a6a4e28b..1818c293a 100755 --- a/switch_model/solve.py +++ b/switch_model/solve.py @@ -111,7 +111,17 @@ def debug(type, value, tb): else: return instance + # make sure the outputs_dir exists (used by some modules during iterate) + # use a race-safe approach in case this code is run in parallel + try: + os.makedirs(instance.options.outputs_dir) + except OSError: + # directory probably exists already, but double-check + if not os.path.isdir(instance.options.outputs_dir): + raise + if instance.options.reload_prior_solution: + print('Loading prior solution...') reload_prior_solution_from_pickle(instance, instance.options.outputs_dir) if instance.options.verbose: print( @@ -119,15 +129,6 @@ def debug(type, value, tb): .format(timer.step_time()) ) else: - # make sure the outputs_dir exists (used by some modules during iterate) - # use a race-safe approach in case this code is run in parallel - try: - os.makedirs(instance.options.outputs_dir) - except OSError: - # directory probably exists already, but double-check - if not os.path.isdir(instance.options.outputs_dir): - raise - # solve the model (reports time for each step as it goes) if iterate_modules: if instance.options.verbose: @@ -142,19 +143,25 @@ def debug(type, value, tb): results.solver.termination_condition) print "" - if instance.options.verbose: - timer.step_time() # restart counter for next step + if instance.options.verbose: + timer.step_time() # restart counter for next step - # report/save results - if instance.options.verbose: - print "Executing post solve functions..." - instance.post_solve() - if instance.options.verbose: - print "Post solve processing completed in {:.2f} s.".format(timer.step_time()) + if not instance.options.no_save_solution: + save_results(instance, instance.options.outputs_dir) + if instance.options.verbose: + print('Saved results in {:.2f} s.'.format(timer.step_time())) + + # report results + # (repeated if model is reloaded, to automatically run any new export code) + if instance.options.verbose: + print "Executing post solve functions..." + instance.post_solve() + if instance.options.verbose: + print "Post solve processing completed in {:.2f} s.".format(timer.step_time()) # end of LogOutput block - if instance.options.interact or instance.options.reload_prior_solution: + if instance.options.interact: m = instance # present the solved model as 'm' for convenience banner = ( "\n" @@ -409,6 +416,9 @@ def define_arguments(argparser): argparser.add_argument( '--reload-prior-solution', default=False, action='store_true', help='Load outputs from a previously solved instance into variable values to allow interactive exploration of model components without having to solve the instance again.') + argparser.add_argument( + '--no-save-solution', default=False, action='store_true', + help="Don't save solution after model is solved.") def add_module_args(parser): @@ -700,6 +710,23 @@ def _options_string_to_dict(istr): ans[token[:index]] = val return ans +def save_results(instance, outdir): + """ + Save model solution for later reuse. + + Note that this pickles a solver results object because the instance itself + cannot be pickled -- see + https://stackoverflow.com/questions/39941520/pyomo-ipopt-does-not-return-solution + """ + # First, save the full solution data to the results object, because recent + # versions of Pyomo only store execution metadata there by default. + instance.solutions.store_to(instance.last_results) + with open(os.path.join(outdir, 'results.pickle'), 'wb') as fh: + pickle.dump(instance.last_results, fh, protocol=-1) + # remove the solution from the results object, to minimize long-term memory use + instance.last_results.solution.clear() + + def query_yes_no(question, default="yes"): """Ask a yes/no question via raw_input() and return their answer. From 535106d408e8b6c7f47b3d4fed4fd25031fd2735 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Mon, 13 Aug 2018 15:51:01 -1000 Subject: [PATCH 10/38] Test presence of Python argument parsing bug This gives an explicit test of whether the list-argument parsing bug still applies to the current version of Python, and disables the warning if not needed. This also documents the bug better so future developers can remove the warning when no longer needed. --- switch_model/utilities.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/switch_model/utilities.py b/switch_model/utilities.py index 6ae43e38b..9715b8718 100644 --- a/switch_model/utilities.py +++ b/switch_model/utilities.py @@ -5,11 +5,10 @@ Utility functions for SWITCH-pyomo. """ -import os, types, importlib, re, sys, argparse, time +import os, types, importlib, re, sys, argparse, time, datetime import __main__ as main from pyomo.environ import * import pyomo.opt -import datetime # Check whether this is an interactive session (determined by whether # __main__ has a __file__ attribute). Scripts can check this value to @@ -548,6 +547,17 @@ def __call__(self, parser, namespace, values, option_string=None): items.append(('exclude', values)) setattr(namespace, self.dest, items) +# Test whether we need to issue warnings about the Python parsing bug. +# (applies to at least Python 2.7.11 and 3.6.2) +# This bug messes up solve-scenarios if the user specifies +# --scenario x --solver-options-string="a=b c=d" +test_parser = argparse.ArgumentParser() +test_parser.add_argument('--arg1', nargs='+', default=[]) +bad_equal_parser = ( + len(test_parser.parse_known_args(['--arg1', 'a', '--arg2=a=1 b=2'])[1]) + == 0 +) + # TODO: merge the _ArgumentParserAllowAbbrev code into this class class _ArgumentParser(_ArgumentParserAllowAbbrev): """ @@ -567,7 +577,7 @@ def parse_known_args(self, args=None, namespace=None): # as list_arg=['a', 'b', '--other-arg="something with space"']. # See https://bugs.python.org/issue34390. # We issue a warning to avoid this. - if args is not None: + if bad_equal_parser and args is not None: for a in args: if a.startswith('--') and '=' in a: print( @@ -575,6 +585,7 @@ def parse_known_args(self, args=None, namespace=None): "safer to use ' ' instead of '=' as a separator." .format(a) ) + time.sleep(2) # give users a chance to see it return super(_ArgumentParser, self).parse_known_args(args, namespace) From 95794d716e3db80aaa8cd4025c626a8498c8f9d5 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Mon, 13 Aug 2018 15:56:46 -1000 Subject: [PATCH 11/38] Patch Pyomo to accelerate reloading prior solution Pyomo 5.1.1 (and probably other versions) is very slow to load prior solutions because it does a full-component search for each component name as it assigns the data. This creates a delay that is quadratic in model size, so reloading solutions takes longer than solving the model from scratch. This code monkey-patches a few lines (!) of pyomo.core.base.PyomoModel.ModelSolutions.add_solution to use Pyomo's built-in caching system for component names. This makes --load-prior-solution fast enough to use for practical work. This is a pretty extreme approach, but there doesn't seem to be another way short of writing our own model-reload code. TODO: create a pull request for Pyomo to do this --- switch_model/solve.py | 74 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 70 insertions(+), 4 deletions(-) diff --git a/switch_model/solve.py b/switch_model/solve.py index 1818c293a..a348c8185 100755 --- a/switch_model/solve.py +++ b/switch_model/solve.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # Copyright (c) 2015-2017 The Switch Authors. All rights reserved. # Licensed under the Apache License, Version 2.0, which is in the LICENSE file. -import sys, os, time, shlex, re +import sys, os, time, shlex, re, inspect, textwrap, types import cPickle as pickle from pyomo.environ import * @@ -189,10 +189,11 @@ def patch_pyomo(): if not patched_pyomo: patched_pyomo = True # patch Pyomo if needed + + # Pyomo 4.2 and 4.3 mistakenly discard the original rule during + # Expression.construct. This makes it impossible to reconstruct + # expressions (e.g., for iterated models). So we patch it. if (4, 2) <= pyomo.version.version_info[:2] <= (4, 3): - # Pyomo 4.2 and 4.3 mistakenly discard the original rule during - # Expression.construct. This makes it impossible to reconstruct - # expressions (e.g., for iterated models). So we patch it. # test whether patch is needed: m = ConcreteModel() m.e = Expression(rule=lambda m: 0) @@ -207,6 +208,67 @@ def new_construct(self, *args, **kwargs): pyomo.environ.Expression.construct = new_construct del m + # Pyomo 5.1.1 (and maybe others) is very slow to load prior solutions because + # it does a full-component search for each component name as it assigns the + # data. This ends up taking longer than solving the model. So we micro- + # patch pyomo.core.base.PyomoModel.ModelSolutions.add_solution to use + # Pyomo's built-in caching system for component names. + # TODO: create a pull request for Pyomo to do this + # NOTE: space inside the long quotes is significant; must match the Pyomo code + old_code = """ + for obj in instance.component_data_objects(Var): + cache[obj.name] = obj + for obj in instance.component_data_objects(Objective, active=True): + cache[obj.name] = obj + for obj in instance.component_data_objects(Constraint, active=True): + cache[obj.name] = obj""" + new_code = """ + # use buffer to avoid full search of component for data object + # which introduces a delay that is quadratic in model size + buf=dict() + for obj in instance.component_data_objects(Var): + cache[obj.getname(fully_qualified=True, name_buffer=buf)] = obj + for obj in instance.component_data_objects(Objective, active=True): + cache[obj.getname(fully_qualified=True, name_buffer=buf)] = obj + for obj in instance.component_data_objects(Constraint, active=True): + cache[obj.getname(fully_qualified=True, name_buffer=buf)] = obj""" + + from pyomo.core.base.PyomoModel import ModelSolutions + add_solution_code = inspect.getsource(ModelSolutions.add_solution) + if old_code in add_solution_code: + # create and inject a new version of the method + add_solution_code = add_solution_code.replace(old_code, new_code) + replace_method(ModelSolutions, 'add_solution', add_solution_code) + else: + print( + "NOTE: The patch to pyomo.core.base.PyomoModel.ModelSolutions.add_solution " + "has been deactivated because the Pyomo source code has changed. " + "Check whether this patch is still needed and edit {} accordingly." + .format(__file__) + ) + +def replace_method(class_ref, method_name, new_source_code): + """ + Replace specified class method with a compiled version of new_source_code. + """ + orig_method = getattr(class_ref, method_name) + # compile code into a function + workspace = dict() + exec(textwrap.dedent(new_source_code), workspace) + new_method = workspace[method_name] + # create a new function with the same body, but using the old method's namespace + new_func = types.FunctionType( + new_method.__code__, + orig_method.__globals__, + orig_method.__name__, + orig_method.__defaults__, + orig_method.__closure__ + ) + # note: this normal function will be automatically converted to an unbound + # method when it is assigned as an attribute of a class + setattr(class_ref, method_name, new_func) + + def reload_prior_solution_from_tabs(instance): """ Assign values to all model variables from .tab files saved after @@ -400,6 +462,10 @@ def define_arguments(argparser): # location of the module list (deprecated) # argparser.add_argument("--inputs-dir", default="inputs", # help='Directory containing input files (default is "inputs")') + argparser.add_argument( + "--input-alias", "--input-aliases", dest="input_aliases", nargs='+', default=[], + help='List of input file substitutions, in form of standard_file.tab=alternative_file.tab, ' + 'useful for sensitivity studies with different inputs.') argparser.add_argument("--outputs-dir", default="outputs", help='Directory to write output files (default is "outputs")') From 3039c8014f7015a552cc65195f949894de9cad0f Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Mon, 13 Aug 2018 16:02:36 -1000 Subject: [PATCH 12/38] Smooth ChargeEVs even when it is an Expression The hawaii.ev_advanced module constructs ChargeEVs as a weighted sum of several possible charging profiles. If this is squared and added to the smoothing objective, it makes the model non-positive-definite, so cplex won't solve it. This code instead smoothes a helper variable ChargeEVsVar that is bound to ChargeEVs. This commit also refactors the smoothing code to make it a little DRYer. --- switch_model/hawaii/smooth_dispatch.py | 101 ++++++++++++++----------- 1 file changed, 55 insertions(+), 46 deletions(-) diff --git a/switch_model/hawaii/smooth_dispatch.py b/switch_model/hawaii/smooth_dispatch.py index 224ecb294..de7eca24e 100644 --- a/switch_model/hawaii/smooth_dispatch.py +++ b/switch_model/hawaii/smooth_dispatch.py @@ -1,4 +1,4 @@ -"""Minimize excess renewable production (dissipated in transmission losses) and +"""Minimize excess renewable production (dissipated in transmission losses) and smooth out demand response and EV charging as much as possible.""" from pyomo.environ import * @@ -13,22 +13,38 @@ def define_components(m): if m.options.verbose: print "Not smoothing dispatch because {} cannot solve a quadratic model.".format(m.options.solver) print "Remove hawaii.smooth_dispatch from modules.txt and iterate.txt to avoid this message." - + # add an alternative objective function that smoothes out time-shiftable energy sources and sinks if m.options.smooth_dispatch: + if hasattr(m, 'ChargeEVs') and isinstance(m.ChargeEVs, Expression): + # Create a variable bound to the ChargeEVs expression + # that can be squared in the objective function without creating + # a non-positive-definite problem. + m.ChargeEVsVar = Var(m.ChargeEVs.index_set()) + m.ChargeEVsVar_fix = Constraint( + m.ChargeEVs.index_set(), + rule=lambda m, *key: m.ChargeEVsVar[key] == m.ChargeEVs[key] + ) + def Smooth_Free_Variables_obj_rule(m): # minimize production (i.e., maximize curtailment / minimize losses) obj = sum( getattr(m, component)[z, t] for z in m.LOAD_ZONES - for t in m.TIMEPOINTS + for t in m.TIMEPOINTS for component in m.Zone_Power_Injections) + # minimize the variability of various slack responses - adjustable_components = [ - 'ShiftDemand', 'ChargeBattery', 'DischargeBattery', 'ChargeEVs', - 'RunElectrolyzerMW', 'LiquifyHydrogenMW', 'DispatchFuelCellMW' + components_to_smooth = [ + 'ShiftDemand', 'ChargeBattery', 'DischargeBattery', + 'RunElectrolyzerMW', 'LiquifyHydrogenMW', 'DispatchFuelCellMW', ] - for var in adjustable_components: + if hasattr(m, 'ChargeEVsVar'): + components_to_smooth.append('ChargeEVsVar') + else: + components_to_smooth.append('ChargeEVs') + + for var in components_to_smooth: if hasattr(m, var): if m.options.verbose: print "Will smooth {}.".format(var) @@ -39,7 +55,7 @@ def Smooth_Free_Variables_obj_rule(m): print "Will smooth charging and discharging of standard storage." obj += sum(m.ChargeStorage[g, tp]*m.ChargeStorage[g, tp] for g, tp in m.STORAGE_GEN_TPS) obj += sum(m.DispatchGen[g, tp]*m.DispatchGen[g, tp] for g, tp in m.STORAGE_GEN_TPS) - # also maximize up reserves, which will (a) minimize arbitrary burning off of renewables + # also maximize up reserves, which will (a) minimize arbitrary burning off of renewables # (e.g., via storage) and (b) give better representation of the amount of reserves actually available if hasattr(m, 'Spinning_Reserve_Up_Provisions') and hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): # advanced module print "Will maximize provision of up reserves." @@ -69,30 +85,23 @@ def pre_iterate(m): # indicate that this was run in iterated mode, so no need for post-solve m.iterated_smooth_dispatch = True elif m.iteration_number == 1: - # save any dual values for later use (solving with a different objective - # will alter them in an undesirable way) - save_duals(m) - # switch to the smoothing objective - fix_obj_expression(m.Minimize_System_Cost) - m.Minimize_System_Cost.deactivate() - m.Smooth_Free_Variables.activate() - print "smoothing free variables..." + pre_smooth_solve(m) else: raise RuntimeError("Reached unexpected iteration number {} in module {}.".format(m.iteration_number, __name__)) return None # no comment on convergence - + def post_iterate(m): if hasattr(m, "ChargeBattery"): double_charge = [ ( - z, t, - m.ChargeBattery[z, t].value, + z, t, + m.ChargeBattery[z, t].value, m.DischargeBattery[z, t].value - ) - for z in m.LOAD_ZONES - for t in m.TIMEPOINTS - if m.ChargeBattery[z, t].value > 0 + ) + for z in m.LOAD_ZONES + for t in m.TIMEPOINTS + if m.ChargeBattery[z, t].value > 0 and m.DischargeBattery[z, t].value > 0 ] if len(double_charge) > 0: @@ -104,20 +113,14 @@ def post_iterate(m): z=z, t=m.tp_timestamp[t], c=c, d=d ) - + if m.options.smooth_dispatch: # setup model for next iteration if m.iteration_number == 0: done = False # we'll have to run again to do the smoothing elif m.iteration_number == 1: # finished smoothing the model - # restore the standard objective - m.Smooth_Free_Variables.deactivate() - m.Minimize_System_Cost.activate() - # unfix the variables - fix_obj_expression(m.Minimize_System_Cost, False) - # restore any duals from the original solution - restore_duals(m) + post_smooth_solve(m) # now we're done done = True else: @@ -129,24 +132,32 @@ def post_iterate(m): return done def post_solve(m, outputs_dir): + """ Smooth dispatch if it wasn't already done during an iterative solution. """ if m.options.smooth_dispatch and not getattr(m, 'iterated_smooth_dispatch', False): - - # store model state and prepare for smoothing - save_duals(m) - fix_obj_expression(m.Minimize_System_Cost) - m.Minimize_System_Cost.deactivate() - m.Smooth_Free_Variables.activate() - + pre_smooth_solve(m) # re-solve and load results - print "smoothing free variables..." m.preprocess() switch_model.solve.solve(m) + post_smooth_solve(m) + +def pre_smooth_solve(m): + """ store model state and prepare for smoothing """ + save_duals(m) + fix_obj_expression(m.Minimize_System_Cost) + m.Minimize_System_Cost.deactivate() + m.Smooth_Free_Variables.activate() + print "smoothing free variables..." + +def post_smooth_solve(m): + """ restore original model state """ + # restore the standard objective + m.Smooth_Free_Variables.deactivate() + m.Minimize_System_Cost.activate() + # unfix the variables + fix_obj_expression(m.Minimize_System_Cost, False) + # restore any duals from the original solution + restore_duals(m) - # restore original model state - m.Smooth_Free_Variables.deactivate() - m.Minimize_System_Cost.activate() - fix_obj_expression(m.Minimize_System_Cost, False) - restore_duals(m) def save_duals(m): if hasattr(m, 'dual'): @@ -182,5 +193,3 @@ def fix_obj_expression(e, status=True): 'Expression {e} does not have an exg, fixed or _args property, ' + 'so it cannot be fixed.'.format(e=e) ) - - From bee83f9303692c72d49620dac25e5a92f70f6f54 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 16 Aug 2018 09:48:52 -1000 Subject: [PATCH 13/38] Miscellaneous bug-fixes and tweaks - rename gen_cap.txt to gen_cap.tab and sort rows if requested - calculate spinning reserves from EVs correctly in the hawaii.ev_advanced module - turn off the must-run requirement in hawaii.kalaeloa when RPS or EV share is above 75% - drop support for nominal-dollar fuel price forecasts in hawaii.scenario_data - add --no-post-solve option - improve description of prior solution file --- switch_model/generators/core/build.py | 7 ++++--- switch_model/hawaii/ev_advanced.py | 2 +- switch_model/hawaii/kalaeloa.py | 18 +++++++++++++++--- switch_model/hawaii/scenario_data.py | 17 +++++++++-------- switch_model/solve.py | 24 +++++++++++++++--------- 5 files changed, 44 insertions(+), 24 deletions(-) diff --git a/switch_model/generators/core/build.py b/switch_model/generators/core/build.py index 11cf0e093..b328a7eee 100644 --- a/switch_model/generators/core/build.py +++ b/switch_model/generators/core/build.py @@ -512,10 +512,11 @@ def load_inputs(mod, switch_data, inputs_dir): switch_data.load(filename=multi_fuels_path) -def post_solve(instance, outdir): +def post_solve(m, outdir): write_table( - instance, instance.GEN_PERIODS, - output_file=os.path.join(outdir, "gen_cap.txt"), + m, + sorted(m.GEN_PERIODS) if m.options.sorted_output else m.GEN_PERIODS, + output_file=os.path.join(outdir, "gen_cap.tab"), headings=( "GENERATION_PROJECT", "PERIOD", "gen_tech", "gen_load_zone", "gen_energy_source", diff --git a/switch_model/hawaii/ev_advanced.py b/switch_model/hawaii/ev_advanced.py index 846885d84..3b22e8e80 100644 --- a/switch_model/hawaii/ev_advanced.py +++ b/switch_model/hawaii/ev_advanced.py @@ -206,7 +206,7 @@ def rule(m): for rt in m.EV_SPINNING_RESERVE_TYPES ) <= m.EVSlackUp[ba, tp] ) - m.Limit_EVSpinningReserveUp = Constraint( + m.Limit_EVSpinningReserveDown = Constraint( m.BALANCING_AREA_TIMEPOINTS, rule=lambda m, ba, tp: sum( diff --git a/switch_model/hawaii/kalaeloa.py b/switch_model/hawaii/kalaeloa.py index 70f765fdb..adb0b4e07 100644 --- a/switch_model/hawaii/kalaeloa.py +++ b/switch_model/hawaii/kalaeloa.py @@ -5,7 +5,7 @@ def define_components(m): # force Kalaeloa_CC3 offline unless 1&2 are at max (per John Cole e-mail 9/28/16) - + # by inspection of figure 8 & 9 in the RPS Study, it appears that Kalaeloa has 3 modes: # commit unit 1, run between 65 and 90 MW # commit units 1 & 2, run each between 65 and 90 MW @@ -54,7 +54,7 @@ def define_components(m): m.KALAELOA_DUCT_BURNER_DISPATCH_POINTS, m.KALAELOA_MAIN_UNITS, rule=lambda m, g_duct, tp, g_main: m.DispatchGen[g_duct, tp] - <= + <= m.RunKalaeloaUnitFull[g_main, tp] * m.gen_capacity_limit_mw[g_duct] ) @@ -70,7 +70,19 @@ def Kalaeloa_Must_Run_rule(m, tp): except AttributeError: both_units_out = False - if both_units_out: + # in 2018, fossil fuel consumption was roughly 1M barrels for various + # taxable uses, 420k barrels for utility, and maybe 500k barrels for + # non-utility electricity production (Kalaeloa)? (It looks like jet + # kerosene was brought in directly.) There are two refineries that split + # the crude oil into LSFO, gasoline and other products. These are co-products, + # so it's probably not cost-effective to keep running any refinery with the + # same amount of steam if the demand for either product drops below 25% + # of the 2018 level. So we assume that Kalaeloa's must-run rule applies + # only until either consumption is below 25% of the starting level. + ev_share = m.ev_share['Oahu', m.tp_period[tp]] if hasattr(m, 'ev_share') else 0.0 + rps_level = m.rps_target_for_period[m.tp_period[tp]] if hasattr(m, 'rps_target_for_period') else 0.0 + + if both_units_out or ev_share >= 0.75 or rps_level >= 0.75: return Constraint.Skip else: return (sum(m.DispatchGen[g, tp] for g in m.KALAELOA_MAIN_UNITS) >= 75.0) diff --git a/switch_model/hawaii/scenario_data.py b/switch_model/hawaii/scenario_data.py index 8188372a1..3fbd2c47c 100644 --- a/switch_model/hawaii/scenario_data.py +++ b/switch_model/hawaii/scenario_data.py @@ -266,10 +266,7 @@ def write_tables(**args): # TODO: add a flag to fuel_costs indicating whether forecasts are real or nominal, # and base year, and possibly inflation rate. if args['fuel_scen_id'] in ('1', '2', '3'): - # no base_year specified; these are in nominal dollars - inflator = 'power(1.0+%(inflation_rate)s, %(base_financial_year)s-c.year)' - else: - inflator = 'power(1.0+%(inflation_rate)s, %(base_financial_year)s-c.base_year)' + raise ValueError("fuel_scen_ids '1', '2' and '3' (specified in nominal dollars) are no longer supported.") if args.get("use_simple_fuel_costs", False): # simple fuel markets with no bulk LNG expansion option (use fuel_cost module) @@ -290,7 +287,11 @@ def write_tables(**args): write_table('fuel_cost.tab', with_period_length + """ SELECT load_zone, replace(fuel_type, ' ', '_') as fuel, p.period, - avg(price_mmbtu * {inflator} + COALESCE(fixed_cost, 0.00)) as fuel_cost + avg( + price_mmbtu + * power(1.0+%(inflation_rate)s, %(base_financial_year)s-c.base_year) + + COALESCE(fixed_cost, 0.00) + ) as fuel_cost FROM fuel_costs c, study_periods p JOIN period_length l USING (period) WHERE load_zone in %(load_zones)s AND fuel_scen_id = %(fuel_scen_id)s @@ -299,7 +300,7 @@ def write_tables(**args): AND c.year >= p.period AND c.year < p.period + l.period_length GROUP BY 1, 2, 3 ORDER BY 1, 2, 3; - """.format(inflator=inflator, lng_selector=lng_selector), args) + """.format(lng_selector=lng_selector), args) else: # advanced fuel markets with LNG expansion options (used by forward-looking models) # (use fuel_markets module) @@ -317,7 +318,7 @@ def write_tables(**args): replace(fuel_type, ' ', '_') as fuel, tier, p.period, - avg(price_mmbtu * {inflator}) as unit_cost, + avg(price_mmbtu * power(1.0+%(inflation_rate)s, %(base_financial_year)s-c.base_year)) as unit_cost, avg(max_avail_at_cost) as max_avail_at_cost, avg(fixed_cost) as fixed_cost, avg(max_age) as max_age @@ -328,7 +329,7 @@ def write_tables(**args): AND (c.year >= p.period AND c.year < p.period + l.period_length) GROUP BY 1, 2, 3, 4 ORDER BY 1, 2, 3, 4; - """.format(inflator=inflator), args) + """, args) write_table('zone_to_regional_fuel_market.tab', """ SELECT DISTINCT load_zone, concat('Hawaii_', replace(fuel_type, ' ', '_')) AS regional_fuel_market diff --git a/switch_model/solve.py b/switch_model/solve.py index a348c8185..aa59b4b79 100755 --- a/switch_model/solve.py +++ b/switch_model/solve.py @@ -79,8 +79,10 @@ def debug(type, value, tb): return model if model.options.reload_prior_solution: + # TODO: allow a directory to be specified after --reload-prior-solution, + # otherwise use outputs_dir. if not os.path.isdir(model.options.outputs_dir): - raise IOError("Specified outputs directory for solution exploration does not exist.") + raise IOError("Directory specified for prior solution does not exist.") # get a list of modules to iterate through iterate_modules = get_iteration_list(model) @@ -153,11 +155,12 @@ def debug(type, value, tb): # report results # (repeated if model is reloaded, to automatically run any new export code) - if instance.options.verbose: - print "Executing post solve functions..." - instance.post_solve() - if instance.options.verbose: - print "Post solve processing completed in {:.2f} s.".format(timer.step_time()) + if not instance.options.no_post_solve: + if instance.options.verbose: + print "Executing post solve functions..." + instance.post_solve() + if instance.options.verbose: + print "Post solve processing completed in {:.2f} s.".format(timer.step_time()) # end of LogOutput block @@ -477,14 +480,17 @@ def define_arguments(argparser): '--quiet', '-q', dest='verbose', action='store_false', help="Don't show information about model preparation and solution (cancels --verbose setting)") argparser.add_argument( - '--interact', default=False, action='store_true', - help='Enter interactive shell after solving the instance to enable inspection of the solved model.') + '--no-post-solve', default=False, action='store_true', + help="Don't run post-solve code on the completed model (i.e., reporting functions).") argparser.add_argument( '--reload-prior-solution', default=False, action='store_true', - help='Load outputs from a previously solved instance into variable values to allow interactive exploration of model components without having to solve the instance again.') + help='Load a previously saved solution; useful for re-running post-solve code or interactively exploring the model (via --interact).') argparser.add_argument( '--no-save-solution', default=False, action='store_true', help="Don't save solution after model is solved.") + argparser.add_argument( + '--interact', default=False, action='store_true', + help='Enter interactive shell after solving the instance to enable inspection of the solved model.') def add_module_args(parser): From c5b692f0ac7370cf63177763b23b35c3c2711626 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Sun, 26 Aug 2018 14:07:22 -1000 Subject: [PATCH 14/38] Move indexed sets from hawaii.switch_patch to main codebase --- .../demand_response/iterative/__init__.py | 6 +- switch_model/generators/core/build.py | 19 ++ switch_model/hawaii/batteries.py | 2 +- .../hawaii/batteries_fixed_calendar_life.py | 4 +- .../hawaii/demand_response_no_reserves.py | 6 +- switch_model/hawaii/emission_rules.py | 2 +- switch_model/hawaii/hydrogen.py | 8 +- switch_model/hawaii/lng_conversion.py | 2 +- switch_model/hawaii/psip_2016_04.py | 2 +- switch_model/hawaii/psip_2016_12.py | 2 +- switch_model/hawaii/pumped_hydro.py | 2 +- switch_model/hawaii/rps.py | 2 +- switch_model/hawaii/save_results.py | 12 +- switch_model/hawaii/switch_patch.py | 186 +++++++++++++++--- switch_model/timescales.py | 20 +- 15 files changed, 221 insertions(+), 54 deletions(-) diff --git a/switch_model/balancing/demand_response/iterative/__init__.py b/switch_model/balancing/demand_response/iterative/__init__.py index 1ffcf8fea..bd23b1ace 100644 --- a/switch_model/balancing/demand_response/iterative/__init__.py +++ b/switch_model/balancing/demand_response/iterative/__init__.py @@ -1071,17 +1071,17 @@ def write_results(m): values=lambda m, z, t: (z, m.tp_period[t], m.tp_timestamp[t]) +tuple( - sum(DispatchGenByFuel(m, p, t, f) for p in m.GENERATION_PROJECTS_BY_FUEL[f]) + sum(DispatchGenByFuel(m, p, t, f) for p in m.GENS_BY_FUEL[f]) for f in m.FUELS ) +tuple( - sum(get(m.DispatchGen, (p, t), 0.0) for p in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s]) + sum(get(m.DispatchGen, (p, t), 0.0) for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s]) for s in m.NON_FUEL_ENERGY_SOURCES ) +tuple( sum( get(m.DispatchUpperLimit, (p, t), 0.0) - get(m.DispatchGen, (p, t), 0.0) - for p in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s] + for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] ) for s in m.NON_FUEL_ENERGY_SOURCES ) diff --git a/switch_model/generators/core/build.py b/switch_model/generators/core/build.py index b328a7eee..8dd3209ec 100644 --- a/switch_model/generators/core/build.py +++ b/switch_model/generators/core/build.py @@ -219,6 +219,13 @@ def define_components(mod): mod.BASELOAD_GENS = Set( initialize=mod.GENERATION_PROJECTS, filter=lambda m, g: m.gen_is_baseload[g]) + # TODO: use a construction dictionary or closure to create all the GENS_BY_... + # indexed sets more efficiently + mod.GENS_BY_TECHNOLOGY = Set( + mod.GENERATION_TECHNOLOGIES, + initialize=lambda m, t: + [g for g in m.GENERATION_PROJECTS if m.gen_tech[g] == t] + ) mod.CAPACITY_LIMITED_GENS = Set(within=mod.GENERATION_PROJECTS) mod.gen_capacity_limit_mw = Param( @@ -243,6 +250,7 @@ def define_components(mod): mod.FUEL_BASED_GENS = Set( initialize=mod.GENERATION_PROJECTS, filter=lambda m, g: m.gen_uses_fuel[g]) + mod.gen_full_load_heat_rate = Param( mod.FUEL_BASED_GENS, within=NonNegativeReals) @@ -256,6 +264,17 @@ def define_components(mod): if g in m.MULTIFUEL_GENS else [m.gen_energy_source[g]])) + mod.GENS_BY_NON_FUEL_ENERGY_SOURCE = Set( + mod.NON_FUEL_ENERGY_SOURCES, + initialize=lambda m, s: + [g for g in m.NON_FUEL_BASED_GENS if m.gen_energy_source[g] == s] + ) + mod.GENS_BY_FUEL = Set( + mod.FUELS, + initialize=lambda m, f: + [g for g in m.FUEL_BASED_GENS if f in m.FUELS_FOR_GEN[g]] + ) + mod.PREDETERMINED_GEN_BLD_YRS = Set( dimen=2) mod.GEN_BLD_YRS = Set( diff --git a/switch_model/hawaii/batteries.py b/switch_model/hawaii/batteries.py index 40306ea05..8f1d9848f 100644 --- a/switch_model/hawaii/batteries.py +++ b/switch_model/hawaii/batteries.py @@ -46,7 +46,7 @@ def define_components(m): # TODO: integrate this with other project data, so it can contribute to reserves, etc. m.BuildBattery = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) m.Battery_Capacity = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.BuildBattery[z, pp] for pp in m.CURRENT_AND_PRIOR_PERIODS[p]) + sum(m.BuildBattery[z, pp] for pp in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p]) ) # rate of charging/discharging battery diff --git a/switch_model/hawaii/batteries_fixed_calendar_life.py b/switch_model/hawaii/batteries_fixed_calendar_life.py index e5ddc50a8..f3387684b 100644 --- a/switch_model/hawaii/batteries_fixed_calendar_life.py +++ b/switch_model/hawaii/batteries_fixed_calendar_life.py @@ -29,7 +29,7 @@ def define_components(m): m.Battery_Capacity = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: sum( m.BuildBattery[z, bld_yr] - for bld_yr in m.CURRENT_AND_PRIOR_PERIODS[p] if bld_yr + m.battery_n_years > p + for bld_yr in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] if bld_yr + m.battery_n_years > p ) ) @@ -53,7 +53,7 @@ def define_components(m): m.BuildBattery[z, bld_yr] * m.battery_capital_cost_per_mwh_capacity_by_year[bld_yr] * crf(m.interest_rate, m.battery_n_years) - for bld_yr in m.CURRENT_AND_PRIOR_PERIODS[p] if bld_yr + m.battery_n_years > p + for bld_yr in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p] if bld_yr + m.battery_n_years > p for z in m.LOAD_ZONES ) ) diff --git a/switch_model/hawaii/demand_response_no_reserves.py b/switch_model/hawaii/demand_response_no_reserves.py index 877b953f1..0120c8d23 100644 --- a/switch_model/hawaii/demand_response_no_reserves.py +++ b/switch_model/hawaii/demand_response_no_reserves.py @@ -741,17 +741,17 @@ def write_results(m): values=lambda m, z, t: (z, m.tp_period[t], m.tp_timestamp[t]) +tuple( - sum(get(m.DispatchGenByFuel, (p, t, f), 0.0) for p in m.GENERATION_PROJECTS_BY_FUEL[f]) + sum(get(m.DispatchGenByFuel, (p, t, f), 0.0) for p in m.GENS_BY_FUEL[f]) for f in m.FUELS ) +tuple( - sum(get(m.DispatchGen, (p, t), 0.0) for p in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s]) + sum(get(m.DispatchGen, (p, t), 0.0) for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s]) for s in m.NON_FUEL_ENERGY_SOURCES ) +tuple( sum( get(m.DispatchUpperLimit, (p, t), 0.0) - get(m.DispatchGen, (p, t), 0.0) - for p in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s] + for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] ) for s in m.NON_FUEL_ENERGY_SOURCES ) diff --git a/switch_model/hawaii/emission_rules.py b/switch_model/hawaii/emission_rules.py index 8246f7829..7340ec9ac 100644 --- a/switch_model/hawaii/emission_rules.py +++ b/switch_model/hawaii/emission_rules.py @@ -11,7 +11,7 @@ def define_components(m): m.BANNED_FUEL_DISPATCH_POINTS = Set(dimen=3, initialize=lambda m: [(g, tp, f) for (f, y) in m.FUEL_BANS - for g in m.GENERATION_PROJECTS_BY_FUEL[f] # if not m.gen_is_cogen[g] + for g in m.GENS_BY_FUEL[f] # if not m.gen_is_cogen[g] for pe in m.PERIODS if m.period_end[pe] >= y for tp in m.TPS_IN_PERIOD[pe] if (g, tp) in m.GEN_TPS ] diff --git a/switch_model/hawaii/hydrogen.py b/switch_model/hawaii/hydrogen.py index 998f028f0..c1efd158e 100644 --- a/switch_model/hawaii/hydrogen.py +++ b/switch_model/hawaii/hydrogen.py @@ -26,7 +26,7 @@ def define_hydrogen_components(m): m.hydrogen_electrolyzer_life_years = Param() m.BuildElectrolyzerMW = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) m.ElectrolyzerCapacityMW = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.BuildElectrolyzerMW[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS[p])) + sum(m.BuildElectrolyzerMW[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p])) m.RunElectrolyzerMW = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) m.ProduceHydrogenKgPerHour = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: m.RunElectrolyzerMW[z, t] * m.hydrogen_electrolyzer_kg_per_mwh) @@ -44,7 +44,7 @@ def define_hydrogen_components(m): m.hydrogen_liquifier_life_years = Param() m.BuildLiquifierKgPerHour = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) # capacity to build, measured in kg/hour of throughput m.LiquifierCapacityKgPerHour = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.BuildLiquifierKgPerHour[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS[p])) + sum(m.BuildLiquifierKgPerHour[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p])) m.LiquifyHydrogenKgPerHour = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) m.LiquifyHydrogenMW = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: m.LiquifyHydrogenKgPerHour[z, t] * m.hydrogen_liquifier_mwh_per_kg @@ -56,7 +56,7 @@ def define_hydrogen_components(m): m.liquid_hydrogen_tank_life_years = Param() m.BuildLiquidHydrogenTankKg = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) # in kg m.LiquidHydrogenTankCapacityKg = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.BuildLiquidHydrogenTankKg[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS[p])) + sum(m.BuildLiquidHydrogenTankKg[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p])) m.StoreLiquidHydrogenKg = Expression(m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, z, ts: m.ts_duration_of_tp[ts] * sum(m.LiquifyHydrogenKgPerHour[z, tp] for tp in m.TPS_IN_TS[ts]) ) @@ -71,7 +71,7 @@ def define_hydrogen_components(m): m.hydrogen_fuel_cell_life_years = Param() m.BuildFuelCellMW = Var(m.LOAD_ZONES, m.PERIODS, within=NonNegativeReals) m.FuelCellCapacityMW = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, p: - sum(m.BuildFuelCellMW[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS[p])) + sum(m.BuildFuelCellMW[z, p_] for p_ in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[p])) m.DispatchFuelCellMW = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals) m.ConsumeHydrogenKgPerHour = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: m.DispatchFuelCellMW[z, t] / m.hydrogen_fuel_cell_mwh_per_kg diff --git a/switch_model/hawaii/lng_conversion.py b/switch_model/hawaii/lng_conversion.py index c5db0c581..15a2f0fa0 100644 --- a/switch_model/hawaii/lng_conversion.py +++ b/switch_model/hawaii/lng_conversion.py @@ -105,7 +105,7 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # list of all projects and timepoints when LNG could potentially be used m.LNG_GEN_TIMEPOINTS = Set(dimen=2, initialize = lambda m: - ((p, t) for p in m.GENERATION_PROJECTS_BY_FUEL['LNG'] for t in m.TIMEPOINTS + ((p, t) for p in m.GENS_BY_FUEL['LNG'] for t in m.TIMEPOINTS if (p, t) in m.GEN_TPS) ) diff --git a/switch_model/hawaii/psip_2016_04.py b/switch_model/hawaii/psip_2016_04.py index c8fcb7387..4f6507ebb 100644 --- a/switch_model/hawaii/psip_2016_04.py +++ b/switch_model/hawaii/psip_2016_04.py @@ -221,7 +221,7 @@ def adjust_psip_credit(g, target): # sum( # m.DispatchGenByFuel[g, tp, f] * m.tp_weight_in_year[tp] * 0.001 # convert from MWh to GWh # for f in ['Diesel', 'LSFO', 'LSFO-Diesel-Blend'] - # for g in m.GENERATION_PROJECTS_BY_FUEL[f] + # for g in m.GENS_BY_FUEL[f] # for tp in m.TPS_IN_PERIOD[per] if (g, tp) in m.GEN_TPS # ) # ) diff --git a/switch_model/hawaii/psip_2016_12.py b/switch_model/hawaii/psip_2016_12.py index 26d7354ed..06fe7115a 100644 --- a/switch_model/hawaii/psip_2016_12.py +++ b/switch_model/hawaii/psip_2016_12.py @@ -381,7 +381,7 @@ def Enforce_Technology_Target_rule(m, per, tech): # sum( # m.DispatchGenByFuel[g, tp, f] * m.tp_weight_in_year[tp] * 0.001 # convert from MWh to GWh # for f in ['Diesel', 'LSFO', 'LSFO-Diesel-Blend'] - # for g in m.GENERATION_PROJECTS_BY_FUEL[f] + # for g in m.GENS_BY_FUEL[f] # for tp in m.TPS_IN_PERIOD[per] if (g, tp) in m.GEN_TPS # ) # ) diff --git a/switch_model/hawaii/pumped_hydro.py b/switch_model/hawaii/pumped_hydro.py index 4b38bbd59..d9dfad985 100644 --- a/switch_model/hawaii/pumped_hydro.py +++ b/switch_model/hawaii/pumped_hydro.py @@ -39,7 +39,7 @@ def define_components(m): # How much pumped hydro to build m.BuildPumpedHydroMW = Var(m.PH_GENS, m.PERIODS, within=NonNegativeReals) m.Pumped_Hydro_Proj_Capacity_MW = Expression(m.PH_GENS, m.PERIODS, rule=lambda m, g, pe: - sum(m.BuildPumpedHydroMW[g, pp] for pp in m.CURRENT_AND_PRIOR_PERIODS[pe]) + sum(m.BuildPumpedHydroMW[g, pp] for pp in m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD[pe]) ) # flag indicating whether any capacity is added to each project each year diff --git a/switch_model/hawaii/rps.py b/switch_model/hawaii/rps.py index 7aa54861e..277f93839 100644 --- a/switch_model/hawaii/rps.py +++ b/switch_model/hawaii/rps.py @@ -108,7 +108,7 @@ def rps_target_for_period_rule(m, p): sum( m.DispatchGen[g, tp] * m.tp_weight[tp] for f in m.NON_FUEL_ENERGY_SOURCES if f in m.RPS_ENERGY_SOURCES - for g in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[f] + for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[f] for tp in m.TPS_FOR_GEN_IN_PERIOD[g, per] ) ) diff --git a/switch_model/hawaii/save_results.py b/switch_model/hawaii/save_results.py index 60645be82..ecfc55afa 100644 --- a/switch_model/hawaii/save_results.py +++ b/switch_model/hawaii/save_results.py @@ -207,16 +207,16 @@ def write_results(m, outputs_dir): ) +tuple( sum( - util.get(m.DispatchGen, (p, t), 0.0) - for p in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s] + util.get(m.DispatchGen, (p, t), 0.0) + for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] if m.gen_load_zone[p] == z ) for s in m.NON_FUEL_ENERGY_SOURCES ) +tuple( sum( - util.get(m.DispatchUpperLimit, (p, t), 0.0) - util.get(m.DispatchGen, (p, t), 0.0) - for p in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s] + util.get(m.DispatchUpperLimit, (p, t), 0.0) - util.get(m.DispatchGen, (p, t), 0.0) + for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] if m.gen_load_zone[p] == z ) for s in m.NON_FUEL_ENERGY_SOURCES @@ -258,7 +258,7 @@ def write_results(m, outputs_dir): ) # prorated by energy source used * DispatchGenByFuel(m, p, t, f) / m.DispatchGen[p, t] - for p in m.GENERATION_PROJECTS_BY_FUEL[f] + for p in m.GENS_BY_FUEL[f] if (p, t) in m.GEN_TPS and m.zone_balancing_area[m.gen_load_zone[p]] == ba ) ) @@ -267,7 +267,7 @@ def write_results(m, outputs_dir): +tuple( sum( m.CommitGenSpinningReservesUp[rt, p, t] - for p in m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE[s] + for p in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] if (p, t) in m.SPINNING_RESERVE_CAPABLE_GEN_TPS and m.zone_balancing_area[m.gen_load_zone[p]] == ba for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[p] ) diff --git a/switch_model/hawaii/switch_patch.py b/switch_model/hawaii/switch_patch.py index 783e2bb94..1ec784c5a 100644 --- a/switch_model/hawaii/switch_patch.py +++ b/switch_model/hawaii/switch_patch.py @@ -21,6 +21,156 @@ def new_create_command_line(*args, **kwargs): old_create_command_line = CPLEXSHELL.create_command_line CPLEXSHELL.create_command_line = new_create_command_line +# Code below is in switch_model.solve now +# # micro-patch pyomo.core.base.PyomoModel.ModelSolutions.add_solution +# # to use a cache for component names; otherwise reloading a solution +# # takes longer than solving the model from scratch. +# # TODO: create a pull request for Pyomo to do this +# import inspect, textwrap, types +# +# def replace_method(class_ref, method_name, new_source_code): +# """ +# Replace specified class method with a compiled version of new_source_code. +# """ +# orig_method = getattr(class_ref, method_name) +# # compile code into a function +# workspace = dict() +# exec(textwrap.dedent(new_source_code), workspace) +# new_method = workspace[method_name] +# # create a new function with the same body, but using the old method's namespace +# new_func = types.FunctionType( +# new_method.__code__, +# orig_method.__globals__, +# orig_method.__name__, +# orig_method.__defaults__, +# orig_method.__closure__ +# ) +# # note: this normal function will be automatically converted to an unbound +# # method when it is assigned as an attribute of a class +# setattr(class_ref, method_name, new_func) +# +# old_code = """ +# for obj in instance.component_data_objects(Var): +# cache[obj.name] = obj +# for obj in instance.component_data_objects(Objective, active=True): +# cache[obj.name] = obj +# for obj in instance.component_data_objects(Constraint, active=True): +# cache[obj.name] = obj +# """ +# new_code = """ +# # use buffer to avoid full search of component for data object +# # which introduces a delay that is quadratic in model size +# buf=dict() +# for obj in instance.component_data_objects(Var): +# cache[obj.getname(fully_qualified=True, name_buffer=buf)] = obj +# for obj in instance.component_data_objects(Objective, active=True): +# cache[obj.getname(fully_qualified=True, name_buffer=buf)] = obj +# for obj in instance.component_data_objects(Constraint, active=True): +# cache[obj.getname(fully_qualified=True, name_buffer=buf)] = obj +# """ +# +# from pyomo.core.base.PyomoModel import ModelSolutions +# add_solution_code = inspect.getsource(ModelSolutions.add_solution) +# if old_code in add_solution_code: +# # create and inject a new version of the method code +# add_solution_code = add_solution_code.replace(old_code, new_code) +# replace_method(ModelSolutions, 'add_solution', add_solution_code) +# else: +# print( +# "NOTE: The patch to pyomo.core.base.PyomoModel.ModelSolutions.add_solution " +# "has been deactivated because the Pyomo source code has changed. " +# "Check whether this patch is still needed and edit {} accordingly." +# .format(__file__) +# ) +# +# x = 7 +# def prx(arg=81): +# print x, arg +# prx() +# # both of these fail, readonly attribute +# # prx.__globals__ = {'prx': prx, 'x': 99} +# # prx.func_globals = {'prx': prx, 'x': 99} +# +# f = prx +# prx2 = types.FunctionType(f.__code__, {'prx': prx, 'x': 99}, f.__name__, f.__defaults__, f.__closure__) +# prx2(22) +# +# f = ModelSolutions.add_solution +# new_f = types.FunctionType(f.__code__, f.__globals__, f.__name__, f.__defaults__, f.__closure__) +# +# type(prx) +# +# def func(test='no argument'): +# print test +# func(3) +# +# func.func_globals +# +# new_func = types.FunctionType( +# func.func_code, +# func.func_globals, +# 'new_func' +# ) +# new_func() +# +# from pyomo.environ import * +# ms = ModelSolutions(AbstractModel()) +# ms.add_solution(1, 2, 3, 4, 5, 6, 7) +# +# from pyomo.environ import * +# from pyomo.core.base.PyomoModel import ModelSolutions +# new_code = """ +# def add_symbol_map(self, symbol_map=None): +# print logging +# """ +# replace_method(ModelSolutions, 'add_symbol_map', new_code) +# ms = ModelSolutions(AbstractModel()) +# ms.add_symbol_map() +# +# ms.add_solution.func_code.co_names +# +# new_bytecode = types.CodeType( +# fc.co_argcount, +# fc.co_nlocals, +# fc.co_stacksize, +# fc.co_flags, +# new_method.func_code.co_code, +# fc.co_consts, +# fc.co_names, +# fc.co_varnames, +# fc.co_filename, +# fc.co_name, +# fc.co_firstlineno, +# fc.co_lnotab +# ) +# +# +# ModelSolutions.add_symbol_map +# +# import types +# class C(object): +# def __init__(self): +# self.val = 3 +# +# def get_val(self): +# return self.val +# +# C.get_val = types.MethodType(get_val, None, C) +# C.get_val +# C.__init__ +# o = C() +# o.get_val() +# +# o.get_val = types.MethodType(get_val, o) +# o.get_val() + + +# (class_ref, name, new_source_code) = (ModelSolutions, 'add_solution', add_solution_code) +# +# space = dict() +# exec("class Dummy(object):\n" + add_solution_code, space) +# type(space['Dummy'].add_solution) + # # TODO: combine the following changes into a pull request for Pyomo # # patch Pyomo's table-reading function to allow .tab files with headers but no data # import os, re @@ -74,26 +224,16 @@ def new_create_command_line(*args, **kwargs): # print "Switch will not be able to read empty data files." -def define_components(m): - """Make various changes to the model to facilitate reporting and avoid unwanted behavior""" - - # define an indexed set of all periods before or including the current one. - # this is useful for calculations that must index over previous and current periods - # e.g., amount of capacity of some resource that has been built - m.CURRENT_AND_PRIOR_PERIODS = Set(m.PERIODS, ordered=True, initialize=lambda m, p: - # note: this is a fast way to refer to all previous periods, which also respects - # the built-in ordering of the set, but you have to be careful because - # (a) pyomo sets are indexed from 1, not 0, and - # (b) python's range() function is not inclusive on the top end. - [m.PERIODS[i] for i in range(1, m.PERIODS.ord(p)+1)] - ) - - # create lists of projects by energy source - # we sort these to help with display, but that may not actually have any effect - m.GENERATION_PROJECTS_BY_FUEL = Set(m.FUELS, initialize=lambda m, f: - sorted([p for p in m.FUEL_BASED_GENS if f in m.FUELS_FOR_GEN[p]]) - ) - m.GENERATION_PROJECTS_BY_NON_FUEL_ENERGY_SOURCE = Set(m.NON_FUEL_ENERGY_SOURCES, initialize=lambda m, s: - sorted([p for p in m.NON_FUEL_BASED_GENS if m.gen_energy_source[p] == s]) - ) - +# def define_components(m): +# """Make various changes to the model to facilitate reporting and avoid unwanted behavior""" +# +# # define an indexed set of all periods before or including the current one. +# # this is useful for calculations that must index over previous and current periods +# # e.g., amount of capacity of some resource that has been built +# m.CURRENT_AND_PRIOR_PERIODS = Set(m.PERIODS, ordered=True, initialize=lambda m, p: +# # note: this is a fast way to refer to all previous periods, which also respects +# # the built-in ordering of the set, but you have to be careful because +# # (a) pyomo sets are indexed from 1, not 0, and +# # (b) python's range() function is not inclusive on the top end. +# [m.PERIODS[i] for i in range(1, m.PERIODS.ord(p)+1)] +# ) diff --git a/switch_model/timescales.py b/switch_model/timescales.py index 85396aab4..d0af25db0 100644 --- a/switch_model/timescales.py +++ b/switch_model/timescales.py @@ -238,7 +238,7 @@ def define_components(mod): # Derived sets and parameters # note: the first four are calculated early so they # can be used for the add_one_to_period_end_rule - + mod.tp_weight = Param( mod.TIMEPOINTS, within=PositiveReals, @@ -267,7 +267,7 @@ def define_components(mod): within=mod.TIMEPOINTS, initialize=lambda m, p: [ t for t in m.TIMEPOINTS if m.tp_period[t] == p]) - + # Decide whether period_end values have been given as exact points in time # (e.g., 2020.0 means 2020-01-01 00:00:00), or as a label for a full # year (e.g., 2020 means 2020-12-31 12:59:59). We use whichever one gives @@ -312,9 +312,9 @@ def add_one_to_period_end_rule(m): mod.TIMEPOINTS, initialize=lambda m, t: m.ts_duration_of_tp[m.tp_ts[t]]) # Identify previous step for each timepoint, for use in tracking - # unit commitment or storage. We use circular indexing (.prevw() method) - # for the timepoints within a timeseries to give consistency between the - # start and end state. (Note: separate timeseries are assumed to be + # unit commitment or storage. We use circular indexing (.prevw() method) + # for the timepoints within a timeseries to give consistency between the + # start and end state. (Note: separate timeseries are assumed to be # disconnected from each other.) mod.tp_previous = Param( mod.TIMEPOINTS, @@ -352,9 +352,17 @@ def validate_period_lengths_rule(m, p): return False return True mod.validate_period_lengths = BuildCheck( - mod.PERIODS, + mod.PERIODS, rule=validate_period_lengths_rule) + # define an indexed set of all periods before or including the current one. + # this is useful for calculations that must index over previous and current periods + mod.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD = Set( + mod.PERIODS, ordered=True, + initialize=lambda m, p: + [p2 for p2 in m.PERIODS if m.PERIODS.ord(p2) <= m.PERIODS.ord(p)] + ) + def load_inputs(mod, switch_data, inputs_dir): """ From 9d8a903ca0f8915ec224634cd13005bc8b654b88 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Sun, 26 Aug 2018 14:08:32 -1000 Subject: [PATCH 15/38] Disaggregate hourly dispatch by non-fuel technologies in hawaii.save_results --- switch_model/hawaii/save_results.py | 104 ++++++++++++++++------------ 1 file changed, 58 insertions(+), 46 deletions(-) diff --git a/switch_model/hawaii/save_results.py b/switch_model/hawaii/save_results.py index ecfc55afa..af1fc2ace 100644 --- a/switch_model/hawaii/save_results.py +++ b/switch_model/hawaii/save_results.py @@ -5,10 +5,10 @@ """ # TODO: eventually make this code more generic, e.g., create a general reporting module -# with functions like +# with functions like # add_report(report_name, indexes, reporting_times=["post_iterate", "post_solve"], restart_times=["scenario batch", "scenario", "iteration"]) # add_columns(report_name, headers, value_rule) -# Then re-create the file with the right headers at each restart time +# Then re-create the file with the right headers at each restart time # (with a name reflecting the degree of generality for restart and reporting) # Then add the rows of data at each reporting time. # The reporting module could also define some or all of the reports below as standard reports. @@ -42,11 +42,11 @@ def summary_headers(m): +((("biofuel_share_all_years",) + tuple('biofuel_share_'+str(p) for p in m.PERIODS)) if hasattr(m, 'RPSEligiblePower') else tuple()) ) - + def summary_values(m): demand_components = [c for c in ('zone_demand_mw', 'ShiftDemand', 'ChargeEVs') if hasattr(m, c)] values = [] - + # Cache SystemCostPerPeriod and SystemCost to speed up saving large models # The time needed to directly access the expressions seems to rise quadratically # with the number of timepoints, so it gets very slow for big models and we don't @@ -56,17 +56,17 @@ def summary_values(m): for p in m.PERIODS: SystemCostPerPeriod[p] = value(m.SystemCostPerPeriod[p]) SystemCost = sum(SystemCostPerPeriod[p] for p in m.PERIODS) - + # scenario name and looping variables values.extend([ str(m.options.scenario_name), m.demand_response_max_share if hasattr(m, 'demand_response_max_share') else 0.0, ]) - + # total cost (all periods) values.append(SystemCost) # m.SystemCost) - - # NPV of total cost / NPV of kWh generated (equivalent to spreading + + # NPV of total cost / NPV of kWh generated (equivalent to spreading # all costs uniformly over all generation) values.append( SystemCost # m.SystemCost @@ -76,8 +76,8 @@ def summary_values(m): for t in m.TIMEPOINTS ) ) - - # total cost / kWh generated in each period + + # total cost / kWh generated in each period # (both discounted to today, so the discounting cancels out) values.extend([ SystemCostPerPeriod[p] # m.SystemCostPerPeriod[p] @@ -121,7 +121,7 @@ def annualize_present_value_period_cost(m, period, val): return val / discount_factor def DispatchGenByFuel(m, g, tp, fuel): - """This is a replacement for mod.DispatchGenByFuel, which is only defined in + """This is a replacement for mod.DispatchGenByFuel, which is only defined in project.no_commit, not project.unitcommit.fuel_use. In the unit commitment version it can only be defined as a quadratically constrained variable, which we don't want to force on all users.""" @@ -136,7 +136,7 @@ def DispatchGenByFuel(m, g, tp, fuel): result = 0.0 elif total_fuel == 0.0: # power produced, but no fuel used (e.g., steam generator on combined-cycle plant). - # allocate evenly between fuels that could be used (should really be allocated the + # allocate evenly between fuels that could be used (should really be allocated the # same as the upstream generator, e.g., CT in combined-cycle plant, but we don't # know that allocation here). result = dispatch / len(m.FUELS_FOR_GEN[g]) @@ -144,16 +144,16 @@ def DispatchGenByFuel(m, g, tp, fuel): # allocate power production proportional to amount of each fuel used result = value(m.GenFuelUseRate[g, tp, fuel]) * dispatch / total_fuel return result - + def write_results(m, outputs_dir): tag = "_" + m.options.scenario_name if m.options.scenario_name else "" - - util.write_table(m, - output_file=os.path.join(outputs_dir, "summary{t}.tsv".format(t=tag)), + + util.write_table(m, + output_file=os.path.join(outputs_dir, "summary{t}.tsv".format(t=tag)), headings=summary_headers(m), values=lambda m: summary_values(m) ) - + if hasattr(m, 'Spinning_Reserve_Up_Requirements'): # pre-calculate amount of reserves provided and needed for each balancing area and timepoint spinning_reserve_provisions = defaultdict(float) @@ -172,7 +172,7 @@ def write_results(m, outputs_dir): for component in m.Spinning_Reserve_Up_Requirements: for (ba, tp), val in getattr(m, component).items(): spinning_reserve_requirements[ba, tp] += val - + # # write out results # util.write_table(m, m.TIMEPOINTS, # output_file=os.path.join(outputs_dir, "dispatch{t}.tsv".format(t=tag)), @@ -182,25 +182,29 @@ def write_results(m, outputs_dir): # for p in m.GENERATION_PROJECTS # ) # ) + + # get a list of non-fuel technologies, to allow disaggregation by type + non_fuel_techs = tuple(sorted(set(m.gen_tech[g] for g in m.NON_FUEL_BASED_GENS))) avg_ts_scale = float(sum(m.ts_scale_to_year[ts] for ts in m.TIMESERIES))/len(m.TIMESERIES) util.write_table( m, m.LOAD_ZONES, m.TIMEPOINTS, - output_file=os.path.join(outputs_dir, "energy_sources{t}.tsv".format(t=tag)), + output_file=os.path.join(outputs_dir, "energy_sources{t}.tsv".format(t=tag)), headings= ("load_zone", "period", "timepoint_label") +tuple(m.FUELS) +tuple(m.NON_FUEL_ENERGY_SOURCES) + +non_fuel_techs +tuple("curtail_"+s for s in m.NON_FUEL_ENERGY_SOURCES) +tuple(m.Zone_Power_Injections) +tuple(m.Zone_Power_Withdrawals) +("spinning_reserve_provision", "spinning_reserve_requirement") +("marginal_cost", "peak_day"), - values=lambda m, z, t: - (z, m.tp_period[t], m.tp_timestamp[t]) + values=lambda m, z, t: + (z, m.tp_period[t], m.tp_timestamp[t]) +tuple( sum( - DispatchGenByFuel(m, p, t, f) - for p in m.GENERATION_PROJECTS_BY_FUEL[f] + DispatchGenByFuel(m, p, t, f) + for p in m.GENS_BY_FUEL[f] if (p, t) in m.GEN_TPS and m.gen_load_zone[p] == z ) for f in m.FUELS @@ -213,6 +217,14 @@ def write_results(m, outputs_dir): ) for s in m.NON_FUEL_ENERGY_SOURCES ) + +tuple( + sum( + util.get(m.DispatchGen, (g, t), 0.0) + for g in m.GENS_BY_TECHNOLOGY[tech] + if m.gen_load_zone[g] == z + ) + for tech in non_fuel_techs + ) +tuple( sum( util.get(m.DispatchUpperLimit, (p, t), 0.0) - util.get(m.DispatchGen, (p, t), 0.0) @@ -224,20 +236,20 @@ def write_results(m, outputs_dir): +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Injections) +tuple(getattr(m, component)[z, t] for component in m.Zone_Power_Withdrawals) +( # save spinning reserve requirements and provisions; note: this assumes one zone per balancing area - (spinning_reserve_provisions[m.zone_balancing_area[z], t], spinning_reserve_requirements[m.zone_balancing_area[z], t]) - if hasattr(m, 'Spinning_Reserve_Up_Requirements') + (spinning_reserve_provisions[m.zone_balancing_area[z], t], spinning_reserve_requirements[m.zone_balancing_area[z], t]) + if hasattr(m, 'Spinning_Reserve_Up_Requirements') else (0.0, 0.0) ) +(util.get(m.dual, m.Zone_Energy_Balance[z, t], 0.0)/m.bring_timepoint_costs_to_base_year[t], # note: this uses 0.0 if no dual available, i.e., with glpk solver 'peak' if m.ts_scale_to_year[m.tp_ts[t]] < avg_ts_scale else 'typical') ) - + if hasattr(m, 'Spinning_Reserve_Up_Requirements') and hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): # advanced module # write the reserve values util.write_table( m, m.BALANCING_AREAS, m.TIMEPOINTS, - output_file=os.path.join(outputs_dir, "up_reserve_sources{t}.tsv".format(t=tag)), + output_file=os.path.join(outputs_dir, "up_reserve_sources{t}.tsv".format(t=tag)), headings= ("balancing_area", "period", "timepoint_label") +tuple(m.FUELS) @@ -246,8 +258,8 @@ def write_results(m, outputs_dir): +tuple(m.Spinning_Reserve_Up_Requirements) +tuple("marginal_cost_"+rt for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS) +("peak_day",), - values=lambda m, ba, t: - (ba, m.tp_period[t], m.tp_timestamp[t]) + values=lambda m, ba, t: + (ba, m.tp_period[t], m.tp_timestamp[t]) +tuple( ( sum( @@ -255,7 +267,7 @@ def write_results(m, outputs_dir): sum( m.CommitGenSpinningReservesUp[rt, p, t] for rt in m.SPINNING_RESERVE_TYPES_FOR_GEN[p] - ) + ) # prorated by energy source used * DispatchGenByFuel(m, p, t, f) / m.DispatchGen[p, t] for p in m.GENS_BY_FUEL[f] @@ -283,22 +295,22 @@ def write_results(m, outputs_dir): ) +tuple( util.get( - m.dual, - util.get(m.Satisfy_Spinning_Reserve_Up_Requirement, (rt, ba, t), None), + m.dual, + util.get(m.Satisfy_Spinning_Reserve_Up_Requirement, (rt, ba, t), None), 0.0 # note: this uses 0.0 if no dual available, i.e., with glpk solver ) / m.bring_timepoint_costs_to_base_year[t] for rt in m.SPINNING_RESERVE_TYPES_FROM_GENS - ) + ) +(('peak' if m.ts_scale_to_year[m.tp_ts[t]] < avg_ts_scale else 'typical'),) ) sorted_projects = tuple(sorted(g for g in m.GENERATION_PROJECTS)) util.write_table( m, m.TIMEPOINTS, - output_file=os.path.join(outputs_dir, "gen_dispatch{t}.tsv".format(t=tag)), + output_file=os.path.join(outputs_dir, "gen_dispatch{t}.tsv".format(t=tag)), headings=("period", "timepoint_label")+sorted_projects, - values=lambda m, t: - (m.tp_period[t], m.tp_timestamp[t]) + values=lambda m, t: + (m.tp_period[t], m.tp_timestamp[t]) + tuple(util.get(m.DispatchGen, (p, t), 0.0) for p in sorted_projects) ) @@ -325,16 +337,16 @@ def gen_energy_source(g): for p in m.PERIODS: if start <= p <= end and value(m.GenCapacity[g, p]) > 0: operate_gen_in_period.add((g, p)) - + built_tech = tuple(sorted(set(m.gen_tech[g] for g in built_gens))) built_energy_source = tuple(sorted(set(gen_energy_source(g) for g in built_gens))) - + battery_capacity_mw = lambda m, z, pe: ( (m.Battery_Capacity[z, pe] / m.battery_min_discharge_time) if hasattr(m, "Battery_Capacity") else 0.0 ) - - util.write_table(m, m.LOAD_ZONES, m.PERIODS, + + util.write_table(m, m.LOAD_ZONES, m.PERIODS, output_file=os.path.join(outputs_dir, "capacity_by_technology{t}.tsv".format(t=tag)), headings=("load_zone", "period") + built_tech + ("hydro", "batteries", "fuel cells"), values=lambda m, z, pe: (z, pe,) + tuple( @@ -350,7 +362,7 @@ def gen_energy_source(g): m.FuelCellCapacityMW[z, pe] if hasattr(m, "FuelCellCapacityMW") else 0 ) ) - util.write_table(m, m.LOAD_ZONES, m.PERIODS, + util.write_table(m, m.LOAD_ZONES, m.PERIODS, output_file=os.path.join(outputs_dir, "capacity_by_energy_source{t}.tsv".format(t=tag)), headings=("load_zone", "period") + built_energy_source + ("hydro", "batteries", "fuel cells"), values=lambda m, z, pe: (z, pe,) + tuple( @@ -366,8 +378,8 @@ def gen_energy_source(g): m.FuelCellCapacityMW[z, pe] if hasattr(m, "FuelCellCapacityMW") else 0 ) ) - - util.write_table(m, m.LOAD_ZONES, m.PERIODS, + + util.write_table(m, m.LOAD_ZONES, m.PERIODS, output_file=os.path.join(outputs_dir, "production_by_technology{t}.tsv".format(t=tag)), headings=("load_zone", "period") + built_tech, values=lambda m, z, pe: (z, pe,) + tuple( @@ -503,7 +515,7 @@ def gen_energy_source(g): # + (('ice_annual_fuel_cost',) if hasattr(m, 'ice_annual_fuel_cost') else ()), # values=cost_breakdown_details # ) - + # util.write_table(m, m.PERIODS, # output_file=os.path.join(outputs_dir, "capacity{t}.tsv".format(t=t)), # headings=("period",)+built_gens, @@ -513,11 +525,11 @@ def gen_energy_source(g): if hasattr(m, 'RFMSupplyTierActivate'): util.write_table(m, m.RFM_SUPPLY_TIERS, - output_file=os.path.join(outputs_dir, "rfm_activate{t}.tsv".format(t=tag)), + output_file=os.path.join(outputs_dir, "rfm_activate{t}.tsv".format(t=tag)), headings=("market", "period", "tier", "activate"), values=lambda m, r, p, st: (r, p, st, m.RFMSupplyTierActivate[r, p, st]) ) - + # import pprint # b=[(g, pe, value(m.BuildGen[g, pe]), m.gen_tech[g], m.gen_overnight_cost[g, pe]) for (g, pe) in m.BuildGen if value(m.BuildGen[g, pe]) > 0] From 17e8088f89c057f85e1b3a7ff811ec9f2fbeccbd Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Sun, 26 Aug 2018 14:13:47 -1000 Subject: [PATCH 16/38] Minimize changes instead of squared values hawaii.smooth_dispatch previously smoothed the hourly dispatch profile for slack elements (storage, demand response, EVs) by minimizing the sum of the squares of the hourly values of these elements. This requires a quadratic solver and is difficult to setup with hawaii.ev_advanced, which defines EV charging as a convex combination of other decision variables. With this commit, we instead switch to minimizing the hour- to-hour variation of these variables (actually the total upward variation over the study period), which is linear, faster to solve, and works about as well. --- switch_model/hawaii/smooth_dispatch.py | 101 +++++++++++++++---------- 1 file changed, 60 insertions(+), 41 deletions(-) diff --git a/switch_model/hawaii/smooth_dispatch.py b/switch_model/hawaii/smooth_dispatch.py index de7eca24e..7dce76dd8 100644 --- a/switch_model/hawaii/smooth_dispatch.py +++ b/switch_model/hawaii/smooth_dispatch.py @@ -1,8 +1,9 @@ -"""Minimize excess renewable production (dissipated in transmission losses) and -smooth out demand response and EV charging as much as possible.""" +"""Minimize excess renewable production (dissipated in transmission and battery +losses) and smooth out demand response and EV charging as much as possible.""" from pyomo.environ import * import switch_model.solve +from switch_model.utilities import iteritems def define_components(m): if m.options.solver in ('cplex', 'cplexamp', 'gurobi', 'gurobi_ampl'): @@ -16,15 +17,56 @@ def define_components(m): # add an alternative objective function that smoothes out time-shiftable energy sources and sinks if m.options.smooth_dispatch: - if hasattr(m, 'ChargeEVs') and isinstance(m.ChargeEVs, Expression): - # Create a variable bound to the ChargeEVs expression - # that can be squared in the objective function without creating - # a non-positive-definite problem. - m.ChargeEVsVar = Var(m.ChargeEVs.index_set()) - m.ChargeEVsVar_fix = Constraint( - m.ChargeEVs.index_set(), - rule=lambda m, *key: m.ChargeEVsVar[key] == m.ChargeEVs[key] - ) + # minimize the range of variation of various slack responses; + # these should each have timepoint as their final index component + components_to_smooth = [ + 'ShiftDemand', 'ChargeBattery', 'DischargeBattery', 'ChargeEVs', + 'RunElectrolyzerMW', 'LiquifyHydrogenMW', 'DispatchFuelCellMW', + ] + + def add_smoothing_entry(m, d, component, key): + """ + Add an entry to the dictionary d of elements to smooth. The entry's + key is based on component name and specified key, and its value is + an expression whose absolute value should be minimized to smooth the + model. The last element of the provided key must be a timepoint, and + the expression is equal to the value of the component at this + timepoint minus its value at the previous timepoint. + """ + tp = key[-1] + prev_tp = m.TPS_IN_TS[m.tp_ts[tp]].prevw(tp) + entry_key = str((component,) + key) + entry_val = component[key] - component[key[:-1]+(prev_tp,)] + d[entry_key] = entry_val + + def rule(m): + m.component_smoothing_dict = dict() + """Find all components to be smoothed""" + # smooth named components + for c in components_to_smooth: + try: + comp = getattr(m, c) + except AttributeError: + continue + print "Will smooth {}.".format(c) + for key in comp: + add_smoothing_entry(m, m.component_smoothing_dict, comp, key) + # smooth standard storage generators + if hasattr(m, 'STORAGE_GEN_TPS'): + print "Will smooth charging and discharging of standard storage." + for c in ['ChargeStorage', 'DispatchGen']: + comp = getattr(m, c) + for key in m.STORAGE_GEN_TPS: + add_smoothing_entry(m, m.component_smoothing_dict, comp, key) + m.make_component_smoothing_dict = BuildAction(rule=rule) + + # Force IncreaseSmoothedValue to equal any step-up in a smoothed value + m.ISV_INDEX = Set(initialize=lambda m: m.component_smoothing_dict.keys()) + m.IncreaseSmoothedValue = Var(m.ISV_INDEX, within=NonNegativeReals) + m.Calculate_IncreaseSmoothedValue = Constraint( + m.ISV_INDEX, + rule=lambda m, k: m.IncreaseSmoothedValue[k] >= m.component_smoothing_dict[k] + ) def Smooth_Free_Variables_obj_rule(m): # minimize production (i.e., maximize curtailment / minimize losses) @@ -33,28 +75,6 @@ def Smooth_Free_Variables_obj_rule(m): for z in m.LOAD_ZONES for t in m.TIMEPOINTS for component in m.Zone_Power_Injections) - - # minimize the variability of various slack responses - components_to_smooth = [ - 'ShiftDemand', 'ChargeBattery', 'DischargeBattery', - 'RunElectrolyzerMW', 'LiquifyHydrogenMW', 'DispatchFuelCellMW', - ] - if hasattr(m, 'ChargeEVsVar'): - components_to_smooth.append('ChargeEVsVar') - else: - components_to_smooth.append('ChargeEVs') - - for var in components_to_smooth: - if hasattr(m, var): - if m.options.verbose: - print "Will smooth {}.".format(var) - comp = getattr(m, var) - obj += sum(comp[z, t]*comp[z, t] for z in m.LOAD_ZONES for t in m.TIMEPOINTS) - # include standard storage generators too - if hasattr(m, 'STORAGE_GEN_TPS'): - print "Will smooth charging and discharging of standard storage." - obj += sum(m.ChargeStorage[g, tp]*m.ChargeStorage[g, tp] for g, tp in m.STORAGE_GEN_TPS) - obj += sum(m.DispatchGen[g, tp]*m.DispatchGen[g, tp] for g, tp in m.STORAGE_GEN_TPS) # also maximize up reserves, which will (a) minimize arbitrary burning off of renewables # (e.g., via storage) and (b) give better representation of the amount of reserves actually available if hasattr(m, 'Spinning_Reserve_Up_Provisions') and hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): # advanced module @@ -66,18 +86,17 @@ def Smooth_Free_Variables_obj_rule(m): reserve_weight.get(rt, 1.0) * component[rt, ba, tp] for rt, ba, tp in component ) + # minimize absolute value of changes in the smoothed variables + obj += sum(v for v in m.IncreaseSmoothedValue.values()) return obj m.Smooth_Free_Variables = Objective(rule=Smooth_Free_Variables_obj_rule, sense=minimize) + + # constrain smoothing objective to find unbounded ray + m.Bound_Obj = Constraint(rule=lambda m: Smooth_Free_Variables_obj_rule(m) <= 1e9) + # leave standard objective in effect for now m.Smooth_Free_Variables.deactivate() - # def Fix_Obj_rule(m): - # # import pdb; pdb.set_trace() - # # make sure the minimum-cost objective is in effect - # # not sure if this is needed, and not sure - # m.Smooth_Free_Variables.deactivate() - # m.Minimize_System_Cost.activate() - # m.Fix_Obj = BuildAction(rule=Fix_Obj_rule) def pre_iterate(m): if m.options.smooth_dispatch: @@ -190,6 +209,6 @@ def fix_obj_expression(e, status=True): pass else: raise ValueError( - 'Expression {e} does not have an exg, fixed or _args property, ' + + 'Expression {e} does not have an expr, fixed or _args property, ' + 'so it cannot be fixed.'.format(e=e) ) From d27234dee43c1e9100e056e06d9b4b44b2fc4ab7 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Sun, 26 Aug 2018 15:01:54 -1000 Subject: [PATCH 17/38] Move old quadratic smoothing method to separate module --- .../hawaii/smooth_dispatch_quadratic.py | 195 ++++++++++++++++++ 1 file changed, 195 insertions(+) create mode 100644 switch_model/hawaii/smooth_dispatch_quadratic.py diff --git a/switch_model/hawaii/smooth_dispatch_quadratic.py b/switch_model/hawaii/smooth_dispatch_quadratic.py new file mode 100644 index 000000000..de7eca24e --- /dev/null +++ b/switch_model/hawaii/smooth_dispatch_quadratic.py @@ -0,0 +1,195 @@ +"""Minimize excess renewable production (dissipated in transmission losses) and +smooth out demand response and EV charging as much as possible.""" + +from pyomo.environ import * +import switch_model.solve + +def define_components(m): + if m.options.solver in ('cplex', 'cplexamp', 'gurobi', 'gurobi_ampl'): + m.options.smooth_dispatch = True + else: + # glpk and cbc can't handle quadratic problem used for smoothing + m.options.smooth_dispatch = False + if m.options.verbose: + print "Not smoothing dispatch because {} cannot solve a quadratic model.".format(m.options.solver) + print "Remove hawaii.smooth_dispatch from modules.txt and iterate.txt to avoid this message." + + # add an alternative objective function that smoothes out time-shiftable energy sources and sinks + if m.options.smooth_dispatch: + if hasattr(m, 'ChargeEVs') and isinstance(m.ChargeEVs, Expression): + # Create a variable bound to the ChargeEVs expression + # that can be squared in the objective function without creating + # a non-positive-definite problem. + m.ChargeEVsVar = Var(m.ChargeEVs.index_set()) + m.ChargeEVsVar_fix = Constraint( + m.ChargeEVs.index_set(), + rule=lambda m, *key: m.ChargeEVsVar[key] == m.ChargeEVs[key] + ) + + def Smooth_Free_Variables_obj_rule(m): + # minimize production (i.e., maximize curtailment / minimize losses) + obj = sum( + getattr(m, component)[z, t] + for z in m.LOAD_ZONES + for t in m.TIMEPOINTS + for component in m.Zone_Power_Injections) + + # minimize the variability of various slack responses + components_to_smooth = [ + 'ShiftDemand', 'ChargeBattery', 'DischargeBattery', + 'RunElectrolyzerMW', 'LiquifyHydrogenMW', 'DispatchFuelCellMW', + ] + if hasattr(m, 'ChargeEVsVar'): + components_to_smooth.append('ChargeEVsVar') + else: + components_to_smooth.append('ChargeEVs') + + for var in components_to_smooth: + if hasattr(m, var): + if m.options.verbose: + print "Will smooth {}.".format(var) + comp = getattr(m, var) + obj += sum(comp[z, t]*comp[z, t] for z in m.LOAD_ZONES for t in m.TIMEPOINTS) + # include standard storage generators too + if hasattr(m, 'STORAGE_GEN_TPS'): + print "Will smooth charging and discharging of standard storage." + obj += sum(m.ChargeStorage[g, tp]*m.ChargeStorage[g, tp] for g, tp in m.STORAGE_GEN_TPS) + obj += sum(m.DispatchGen[g, tp]*m.DispatchGen[g, tp] for g, tp in m.STORAGE_GEN_TPS) + # also maximize up reserves, which will (a) minimize arbitrary burning off of renewables + # (e.g., via storage) and (b) give better representation of the amount of reserves actually available + if hasattr(m, 'Spinning_Reserve_Up_Provisions') and hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): # advanced module + print "Will maximize provision of up reserves." + reserve_weight = {'contingency': 0.9, 'regulation': 1.1} + for comp_name in m.Spinning_Reserve_Up_Provisions: + component = getattr(m, comp_name) + obj += -0.1 * sum( + reserve_weight.get(rt, 1.0) * component[rt, ba, tp] + for rt, ba, tp in component + ) + return obj + m.Smooth_Free_Variables = Objective(rule=Smooth_Free_Variables_obj_rule, sense=minimize) + # leave standard objective in effect for now + m.Smooth_Free_Variables.deactivate() + + # def Fix_Obj_rule(m): + # # import pdb; pdb.set_trace() + # # make sure the minimum-cost objective is in effect + # # not sure if this is needed, and not sure + # m.Smooth_Free_Variables.deactivate() + # m.Minimize_System_Cost.activate() + # m.Fix_Obj = BuildAction(rule=Fix_Obj_rule) + +def pre_iterate(m): + if m.options.smooth_dispatch: + if m.iteration_number == 0: + # indicate that this was run in iterated mode, so no need for post-solve + m.iterated_smooth_dispatch = True + elif m.iteration_number == 1: + pre_smooth_solve(m) + else: + raise RuntimeError("Reached unexpected iteration number {} in module {}.".format(m.iteration_number, __name__)) + + return None # no comment on convergence + +def post_iterate(m): + if hasattr(m, "ChargeBattery"): + double_charge = [ + ( + z, t, + m.ChargeBattery[z, t].value, + m.DischargeBattery[z, t].value + ) + for z in m.LOAD_ZONES + for t in m.TIMEPOINTS + if m.ChargeBattery[z, t].value > 0 + and m.DischargeBattery[z, t].value > 0 + ] + if len(double_charge) > 0: + print "" + print "WARNING: batteries are simultaneously charged and discharged in some hours." + print "This is usually done to relax the biofuel limit." + for (z, t, c, d) in double_charge: + print 'ChargeBattery[{z}, {t}]={c}, DischargeBattery[{z}, {t}]={d}'.format( + z=z, t=m.tp_timestamp[t], + c=c, d=d + ) + + if m.options.smooth_dispatch: + # setup model for next iteration + if m.iteration_number == 0: + done = False # we'll have to run again to do the smoothing + elif m.iteration_number == 1: + # finished smoothing the model + post_smooth_solve(m) + # now we're done + done = True + else: + raise RuntimeError("Reached unexpected iteration number {} in module {}.".format(m.iteration_number, __name__)) + else: + # not smoothing the dispatch + done = True + + return done + +def post_solve(m, outputs_dir): + """ Smooth dispatch if it wasn't already done during an iterative solution. """ + if m.options.smooth_dispatch and not getattr(m, 'iterated_smooth_dispatch', False): + pre_smooth_solve(m) + # re-solve and load results + m.preprocess() + switch_model.solve.solve(m) + post_smooth_solve(m) + +def pre_smooth_solve(m): + """ store model state and prepare for smoothing """ + save_duals(m) + fix_obj_expression(m.Minimize_System_Cost) + m.Minimize_System_Cost.deactivate() + m.Smooth_Free_Variables.activate() + print "smoothing free variables..." + +def post_smooth_solve(m): + """ restore original model state """ + # restore the standard objective + m.Smooth_Free_Variables.deactivate() + m.Minimize_System_Cost.activate() + # unfix the variables + fix_obj_expression(m.Minimize_System_Cost, False) + # restore any duals from the original solution + restore_duals(m) + + +def save_duals(m): + if hasattr(m, 'dual'): + m.old_dual_dict = m.dual._dict.copy() + if hasattr(m, 'rc'): + m.old_rc_dict = m.rc._dict.copy() + +def restore_duals(m): + if hasattr(m, 'dual'): + m.dual._dict = m.old_dual_dict + if hasattr(m, 'rc'): + m.rc._dict = m.old_rc_dict + +def fix_obj_expression(e, status=True): + """Recursively fix all variables included in an objective expression.""" + if hasattr(e, 'fixed'): + e.fixed = status # see p. 171 of the Pyomo book + elif hasattr(e, '_numerator'): + for e2 in e._numerator: + fix_obj_expression(e2, status) + for e2 in e._denominator: + fix_obj_expression(e2, status) + elif hasattr(e, '_args'): + for e2 in e._args: + fix_obj_expression(e2, status) + elif hasattr(e, 'expr'): + fix_obj_expression(e.expr, status) + elif hasattr(e, 'is_constant'): + # parameter; we don't actually care if it's mutable or not + pass + else: + raise ValueError( + 'Expression {e} does not have an exg, fixed or _args property, ' + + 'so it cannot be fixed.'.format(e=e) + ) From a8a707f6f72376bb366837389bdd715fba663a13 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Sun, 26 Aug 2018 19:02:11 -1000 Subject: [PATCH 18/38] Bug-fix key for smoothing variable --- switch_model/hawaii/smooth_dispatch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/switch_model/hawaii/smooth_dispatch.py b/switch_model/hawaii/smooth_dispatch.py index 7dce76dd8..bb9e7ca79 100644 --- a/switch_model/hawaii/smooth_dispatch.py +++ b/switch_model/hawaii/smooth_dispatch.py @@ -35,7 +35,7 @@ def add_smoothing_entry(m, d, component, key): """ tp = key[-1] prev_tp = m.TPS_IN_TS[m.tp_ts[tp]].prevw(tp) - entry_key = str((component,) + key) + entry_key = str((component.name,) + key) entry_val = component[key] - component[key[:-1]+(prev_tp,)] d[entry_key] = entry_val From e76b19c0e35df1e646bf2468ce01cedcd584e6ad Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Mon, 27 Aug 2018 09:21:18 -1000 Subject: [PATCH 19/38] Work around infeasible models in hawaii.smooth_dispatch --- switch_model/hawaii/smooth_dispatch.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/switch_model/hawaii/smooth_dispatch.py b/switch_model/hawaii/smooth_dispatch.py index bb9e7ca79..0fdd7478e 100644 --- a/switch_model/hawaii/smooth_dispatch.py +++ b/switch_model/hawaii/smooth_dispatch.py @@ -156,7 +156,7 @@ def post_solve(m, outputs_dir): pre_smooth_solve(m) # re-solve and load results m.preprocess() - switch_model.solve.solve(m) + solve(m) post_smooth_solve(m) def pre_smooth_solve(m): @@ -167,6 +167,16 @@ def pre_smooth_solve(m): m.Smooth_Free_Variables.activate() print "smoothing free variables..." +def solve(m): + try: + switch_model.solve.solve(m) + except RuntimeError as e: + if e.message.lower() == 'infeasible model': + # show a warning, but don't abort the overall post_solve process + print('WARNING: model became infeasible when smoothing; reverting to original solution.') + else: + raise + def post_smooth_solve(m): """ restore original model state """ # restore the standard objective From 995d58b80a654606a7dd79ebaafc45549eadceaa Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Mon, 27 Aug 2018 10:35:51 -1000 Subject: [PATCH 20/38] Report total production by energy source in hawaii.save_results This also adds ad-hoc technologies to production_by_technology.tsv. --- switch_model/hawaii/save_results.py | 76 ++++++++++++++++++++++++++--- 1 file changed, 68 insertions(+), 8 deletions(-) diff --git a/switch_model/hawaii/save_results.py b/switch_model/hawaii/save_results.py index af1fc2ace..785942d64 100644 --- a/switch_model/hawaii/save_results.py +++ b/switch_model/hawaii/save_results.py @@ -185,6 +185,11 @@ def write_results(m, outputs_dir): # get a list of non-fuel technologies, to allow disaggregation by type non_fuel_techs = tuple(sorted(set(m.gen_tech[g] for g in m.NON_FUEL_BASED_GENS))) + # get a list of ad-hoc technologies (not included in standard generation projects) + ad_hoc_sources = tuple( + s for s in m.Zone_Power_Injections + if s not in {'ZoneTotalCentralDispatch', 'ZoneTotalDistributedDispatch'} + ) avg_ts_scale = float(sum(m.ts_scale_to_year[ts] for ts in m.TIMESERIES))/len(m.TIMESERIES) util.write_table( m, m.LOAD_ZONES, m.TIMEPOINTS, @@ -381,17 +386,72 @@ def gen_energy_source(g): util.write_table(m, m.LOAD_ZONES, m.PERIODS, output_file=os.path.join(outputs_dir, "production_by_technology{t}.tsv".format(t=tag)), - headings=("load_zone", "period") + built_tech, - values=lambda m, z, pe: (z, pe,) + tuple( - sum( - m.DispatchGen[g, tp] * m.tp_weight_in_year[tp] * 0.001 # MWh -> GWh - for g in built_gens if m.gen_tech[g] == t and m.gen_load_zone[g] == z - for tp in m.TPS_FOR_GEN_IN_PERIOD[g, pe] + headings=("load_zone", "period") + built_tech + ad_hoc_sources, + values=lambda m, z, pe: + (z, pe,) + + tuple( + sum( + m.DispatchGen[g, tp] * m.tp_weight_in_year[tp] * 0.001 # MWh -> GWh + for g in built_gens if m.gen_tech[g] == t and m.gen_load_zone[g] == z + for tp in m.TPS_FOR_GEN_IN_PERIOD[g, pe] + ) + for t in built_tech + ) + + tuple( # ad hoc techs: hydrogen, pumped storage, etc. + sum( + comp[z, tp] * m.tp_weight_in_year[tp] * 0.001 + for tp in m.TPS_IN_PERIOD[pe] + ) + for comp in [getattr(m, cname) for cname in ad_hoc_sources] + ) + ) + + # option 1: make separate tables of production_by_technology and production_by_energy_source, + # and use columns from production_by_technology to replace corresponding columns from + # production_by_energy_source in order to disaggregate certain energy sources. + # option 2: make a single table that shows production by technology and energy source + # (sub slices); but this has to either have two heading levels or concatenate them or + # use a database format rather than a table format, which will then require post-processing + # by pandas or an Excel pivot table. + # For now, we go with option 1. + util.write_table(m, m.LOAD_ZONES, m.PERIODS, + output_file=os.path.join(outputs_dir, "production_by_energy_source{t}.tsv".format(t=tag)), + headings= + ("load_zone", "period") + + tuple(m.FUELS) + + tuple(m.NON_FUEL_ENERGY_SOURCES) + + ad_hoc_sources, + values=lambda m, z, pe: + (z, pe,) + + tuple( + sum( + DispatchGenByFuel(m, g, tp, f) * m.tp_weight_in_year[tp] * 0.001 # MWh -> GWh + for g in m.GENS_BY_FUEL[f] + if m.gen_load_zone[g] == z + for tp in m.TPS_FOR_GEN_IN_PERIOD[g, pe] + ) + for f in m.FUELS + ) + + tuple( + sum( + m.DispatchGen[g, tp] * m.tp_weight_in_year[tp] * 0.001 # MWh -> GWh + for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE[s] + if m.gen_load_zone[g] == z + for tp in m.TPS_FOR_GEN_IN_PERIOD[g, pe] + ) + for s in m.NON_FUEL_ENERGY_SOURCES + ) + + tuple( # ad hoc techs: hydrogen, pumped storage, etc. + sum( + comp[z, tp] * m.tp_weight_in_year[tp] * 0.001 + for tp in m.TPS_IN_PERIOD[pe] + ) + for comp in [getattr(m, cname) for cname in ad_hoc_sources] ) - for t in built_tech - ) # TODO: add hydro and hydrogen ) + + # def cost_breakdown_details(m, z, pe): # values = [z, pe] # # capacity built, conventional plants From 7474b81ca0193832dfeb75851be5ec1c32303b80 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Sun, 9 Sep 2018 09:11:34 -1000 Subject: [PATCH 21/38] Fix Hawaii storage energy cost calculation --- switch_model/hawaii/scenario_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/switch_model/hawaii/scenario_data.py b/switch_model/hawaii/scenario_data.py index 3fbd2c47c..1f5de82e6 100644 --- a/switch_model/hawaii/scenario_data.py +++ b/switch_model/hawaii/scenario_data.py @@ -441,7 +441,7 @@ def write_tables(**args): c.capital_cost_per_kw * 1000.0 * power(1.0+%(inflation_rate)s, %(base_financial_year)s-c.base_year) AS gen_overnight_cost, - c.capital_cost_per_kwh AS gen_storage_energy_overnight_cost, + c.capital_cost_per_kwh * 1000.0 AS gen_storage_energy_overnight_cost, c.fixed_o_m * 1000.0 * power(1.0+%(inflation_rate)s, %(base_financial_year)s-i.base_year) AS gen_fixed_o_m, i.min_vintage_year -- used for build_year filter below From 38c87f1d857966820bbd7551eb411c5635fa3e1c Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 20 Sep 2018 15:11:10 -1000 Subject: [PATCH 22/38] Fix cases where 'proj'ect was renamed to 'g'ect --- .../generators/core/commit/operate.py | 2 +- switch_model/hawaii/lng_conversion.py | 8 ++-- switch_model/hawaii/pumped_hydro.py | 43 +++++++++---------- switch_model/hawaii/rps.py | 2 +- 4 files changed, 27 insertions(+), 28 deletions(-) diff --git a/switch_model/generators/core/commit/operate.py b/switch_model/generators/core/commit/operate.py index 1638b2995..2275231e4 100644 --- a/switch_model/generators/core/commit/operate.py +++ b/switch_model/generators/core/commit/operate.py @@ -23,7 +23,7 @@ def define_components(mod): """ Adds components to a Pyomo abstract model object to describe - unit commitment for gects. Unless otherwise stated, all power + unit commitment for projects. Unless otherwise stated, all power capacity is specified in units of MW and all sets and parameters are mandatory. diff --git a/switch_model/hawaii/lng_conversion.py b/switch_model/hawaii/lng_conversion.py index 15a2f0fa0..c8079b64c 100644 --- a/switch_model/hawaii/lng_conversion.py +++ b/switch_model/hawaii/lng_conversion.py @@ -180,8 +180,8 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # # force LNG-capable plants to use only LNG until they exhaust all active tiers # # note: we assume no single project can produce more than # # 1500 MW from LNG at 10 MMBtu/MWh heat rate - # big_gect_mw = 1500 # MW - # big_gect_lng = big_gect_mw * 10 # MMBtu/hour + # big_project_mw = 1500 # MW + # big_project_lng = big_project_mw * 10 # MMBtu/hour # def Only_LNG_In_Converted_Plants_rule(m, g, tp): # if g not in m.LNG_CONVERTED_PLANTS: # return Constraint.Skip @@ -193,7 +193,7 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # ) # rfm = m.zone_rfm[m.gen_load_zone[g], 'LNG'] # lng_market_exhausted = 1 - m.LNG_Has_Slack[rfm, m.tp_period[tp]] - # return (non_lng_fuel <= big_gect_lng * lng_market_exhausted) + # return (non_lng_fuel <= big_project_lng * lng_market_exhausted) # m.Only_LNG_In_Converted_Plants = Constraint( # m.LNG_GEN_TIMEPOINTS, # rule=Only_LNG_In_Converted_Plants_rule @@ -222,7 +222,7 @@ def Force_LNG_Tier_rule(m, rfm, per, tier): # m.DispatchGen[g, tp] # >= # m.DispatchUpperLimit[g, tp] - # - big_gect_mw * lng_market_exhausted + # - big_project_mw * lng_market_exhausted # ) # return rule # m.Force_Converted_Plants_On = Constraint( diff --git a/switch_model/hawaii/pumped_hydro.py b/switch_model/hawaii/pumped_hydro.py index d9dfad985..63e4d287e 100644 --- a/switch_model/hawaii/pumped_hydro.py +++ b/switch_model/hawaii/pumped_hydro.py @@ -6,36 +6,36 @@ def define_arguments(argparser): argparser.add_argument("--ph-mw", type=float, default=None, help="Force construction of a certain total capacity of pumped storage hydro during one or more periods chosen by SWITCH") argparser.add_argument("--ph-year", type=int, default=None, - help="Force all pumped storage hydro to be constructed during one particular year (must be in the list of periods)") + help="Force all pumped storage hydro to be constructed during one particular year (must be in the list of periods)") def define_components(m): - + m.PH_GENS = Set() - + m.ph_load_zone = Param(m.PH_GENS) - + m.ph_capital_cost_per_mw = Param(m.PH_GENS, within=NonNegativeReals) - m.ph_gect_life = Param(m.PH_GENS, within=NonNegativeReals) - + m.ph_project_life = Param(m.PH_GENS, within=NonNegativeReals) + # annual O&M cost for pumped hydro project, percent of capital cost m.ph_fixed_om_percent = Param(m.PH_GENS, within=NonNegativeReals) - + # total annual cost m.ph_fixed_cost_per_mw_per_year = Param(m.PH_GENS, initialize=lambda m, p: - m.ph_capital_cost_per_mw[p] * - (crf(m.interest_rate, m.ph_gect_life[p]) + m.ph_fixed_om_percent[p]) + m.ph_capital_cost_per_mw[p] * + (crf(m.interest_rate, m.ph_project_life[p]) + m.ph_fixed_om_percent[p]) ) - + # round-trip efficiency of the pumped hydro facility m.ph_efficiency = Param(m.PH_GENS) - + # average energy available from water inflow each day # (system must balance energy net of this each day) m.ph_inflow_mw = Param(m.PH_GENS) - + # maximum size of pumped hydro project m.ph_max_capacity_mw = Param(m.PH_GENS) - + # How much pumped hydro to build m.BuildPumpedHydroMW = Var(m.PH_GENS, m.PERIODS, within=NonNegativeReals) m.Pumped_Hydro_Proj_Capacity_MW = Expression(m.PH_GENS, m.PERIODS, rule=lambda m, g, pe: @@ -43,7 +43,7 @@ def define_components(m): ) # flag indicating whether any capacity is added to each project each year - m.BuildAnyPumpedHydro = Var(m.PH_GENS, m.PERIODS, within=Binary) + m.BuildAnyPumpedHydro = Var(m.PH_GENS, m.PERIODS, within=Binary) # How to run pumped hydro m.PumpedHydroProjGenerateMW = Var(m.PH_GENS, m.TIMEPOINTS, within=NonNegativeReals) @@ -55,7 +55,7 @@ def define_components(m): m.Pumped_Hydro_Max_Build = Constraint(m.PH_GENS, m.PERIODS, rule=lambda m, g, pe: m.Pumped_Hydro_Proj_Capacity_MW[g, pe] <= m.ph_max_capacity_mw[g] ) - + # force the build flag on for the year(s) when pumped hydro is built m.Pumped_Hydro_Set_Build_Flag = Constraint(m.PH_GENS, m.PERIODS, rule=lambda m, g, pe: m.BuildPumpedHydroMW[g, pe] <= m.BuildAnyPumpedHydro[g, pe] * m.ph_max_capacity_mw[g] @@ -70,7 +70,7 @@ def define_components(m): # m.Deactivate_Pumped_Hydro_Build_All_Or_None = BuildAction(rule=lambda m: # m.Pumped_Hydro_Build_All_Or_None.deactivate() # ) - + # limits on pumping and generation m.Pumped_Hydro_Max_Generate_Rate = Constraint(m.PH_GENS, m.TIMEPOINTS, rule=lambda m, g, t: m.PumpedHydroProjGenerateMW[g, t] @@ -100,22 +100,22 @@ def define_components(m): m.StorePumpedHydro = Expression(m.LOAD_ZONES, m.TIMEPOINTS, rule=lambda m, z, t: sum(m.PumpedHydroProjStoreMW[g, t] for g in m.PH_GENS if m.ph_load_zone[g]==z) ) - + # calculate costs m.Pumped_Hydro_Fixed_Cost_Annual = Expression(m.PERIODS, rule=lambda m, pe: sum(m.ph_fixed_cost_per_mw_per_year[g] * m.Pumped_Hydro_Proj_Capacity_MW[g, pe] for g in m.PH_GENS) ) m.Cost_Components_Per_Period.append('Pumped_Hydro_Fixed_Cost_Annual') - + # add pumped hydro to zonal energy balance m.Zone_Power_Injections.append('GeneratePumpedHydro') m.Zone_Power_Withdrawals.append('StorePumpedHydro') - + # total pumped hydro capacity in each zone each period (for reporting) m.Pumped_Hydro_Capacity_MW = Expression(m.LOAD_ZONES, m.PERIODS, rule=lambda m, z, pe: sum(m.Pumped_Hydro_Proj_Capacity_MW[g, pe] for g in m.PH_GENS if m.ph_load_zone[g]==z) ) - + # force construction of a fixed amount of pumped hydro if m.options.ph_mw is not None: print "Forcing construction of {m} MW of pumped hydro.".format(m=m.options.ph_mw) @@ -139,6 +139,5 @@ def load_inputs(m, switch_data, inputs_dir): autoselect=True, index=m.PH_GENS, param=( - m.ph_load_zone, m.ph_capital_cost_per_mw, m.ph_gect_life, m.ph_fixed_om_percent, + m.ph_load_zone, m.ph_capital_cost_per_mw, m.ph_project_life, m.ph_fixed_om_percent, m.ph_efficiency, m.ph_inflow_mw, m.ph_max_capacity_mw)) - \ No newline at end of file diff --git a/switch_model/hawaii/rps.py b/switch_model/hawaii/rps.py index 277f93839..0d8e5d451 100644 --- a/switch_model/hawaii/rps.py +++ b/switch_model/hawaii/rps.py @@ -664,7 +664,7 @@ def advanced1_DispatchGenRenewableMW(m): # omit binary variables and big-m constraints if len(m.FUELS_FOR_GEN[p]) == 1 # (assign all production to the single fuel) # use m.GenFuelUseRate[g, t, f] / m.gen_full_load_heat_rate[g] - # for gects with no heat rate curve and no startup fuel + # for projects with no heat rate curve and no startup fuel # note: a continuous, quadratic version of this function can be created as follows: # - make DispatchFuelFlag a PercentFraction instead of Binary From 6384d0032daf9091d6a6f752fc3575cc8a15daeb Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 20 Sep 2018 15:12:32 -1000 Subject: [PATCH 23/38] Formalize limits on load-shifting between hours --- switch_model/hawaii/demand_response_simple.py | 44 ++++++++++++------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/switch_model/hawaii/demand_response_simple.py b/switch_model/hawaii/demand_response_simple.py index a4bd8aa94..140b2b5bc 100644 --- a/switch_model/hawaii/demand_response_simple.py +++ b/switch_model/hawaii/demand_response_simple.py @@ -1,29 +1,42 @@ +from __future__ import division import os from pyomo.environ import * from switch_model.financials import capital_recovery_factor as crf def define_arguments(argparser): - argparser.add_argument('--demand-response-share', type=float, default=0.30, + argparser.add_argument('--demand-response-share', type=float, default=0.30, help="Fraction of hourly load that can be shifted to other times of day (default=0.30)") - argparser.add_argument('--demand-response-reserve-types', nargs='+', default=['spinning'], + argparser.add_argument('--demand-response-reserve-types', nargs='+', default=['spinning'], help= "Type(s) of reserves to provide from demand response (e.g., 'contingency' or 'regulation'). " "Specify 'none' to disable." ) def define_components(m): - + # maximum share of hourly load that can be rescheduled # this is mutable so various values can be tested m.demand_response_max_share = Param(default=m.options.demand_response_share, mutable=True) + # maximum amount of load that can be _added_ each hour; we assume + # it is 8x the maximum reduction, which is roughly equivalent to + # concentrating the shifted load into a 3-hour period. + # Note: before 9/12/18, we didn't enforce this for scheduling, but did + # apply it when calculating down reserve provision, which meant we would + # give negative down reserves when shifted demand exceeded this quantity, + # which would have to come from somewhere else. + m.demand_response_max_increase = Param( + rule=lambda m: m.demand_response_max_share * 24 / 3 + ) + # adjustment to demand during each hour (positive = higher demand) - m.ShiftDemand = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=Reals, bounds=lambda m, z, t: - ( - (-1.0) * m.demand_response_max_share * m.zone_demand_mw[z, t], - # assume all shiftable load can be concentrated into 3 hours (no less) - None # 24/3 * m.demand_response_max_share * m.zone_demand_mw[z, t] - ) + m.ShiftDemand = Var( + m.LOAD_ZONES, m.TIMEPOINTS, within=Reals, + bounds=lambda m, z, t: + ( + (-1.0) * m.demand_response_max_share * m.zone_demand_mw[z, t], + m.demand_response_max_increase * m.zone_demand_mw[z, t] + ) ) # all changes to demand must balance out over the course of the day @@ -47,8 +60,9 @@ def define_components(m): ) m.DemandResponseSlackDown = Expression(m.BALANCING_AREA_TIMEPOINTS, rule=lambda m, b, tp: sum( - # Assume shiftable load can only be raised by factor of 8 (i.e., concentrate in 3 hours) - 24/3 * m.demand_response_max_share * m.zone_demand_mw[z, tp] - m.ShiftDemand[z, tp] + # difference between scheduled load and max allowed + m.demand_response_max_increase * m.zone_demand_mw[z, tp] + - m.ShiftDemand[z, tp] for z in m.ZONES_IN_BALANCING_AREA[b] ) ) @@ -69,16 +83,16 @@ def define_components(m): ) # constrain reserve provision within available slack m.Limit_DemandResponseSpinningReserveUp = Constraint( - m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: sum( m.DemandResponseSpinningReserveUp[rt, ba, tp] for rt in m.DR_SPINNING_RESERVE_TYPES ) <= m.DemandResponseSlackUp[ba, tp] ) m.Limit_DemandResponseSpinningReserveDown = Constraint( - m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: sum( m.DemandResponseSpinningReserveDown[rt, ba, tp] for rt in m.DR_SPINNING_RESERVE_TYPES From 7f0b22adc6f967db627d7a7f564d825133318e1c Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 20 Sep 2018 15:13:24 -1000 Subject: [PATCH 24/38] Add --rps-prefer-dist-pv option --- switch_model/hawaii/rps.py | 46 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/switch_model/hawaii/rps.py b/switch_model/hawaii/rps.py index 0d8e5d451..292043e74 100644 --- a/switch_model/hawaii/rps.py +++ b/switch_model/hawaii/rps.py @@ -22,6 +22,8 @@ def define_arguments(argparser): help="Don't allow any new wind capacity except to replace existing capacity.") argparser.add_argument('--rps-no-wind', action='store_true', default=False, help="Don't allow any new wind capacity or replacement of existing capacity.") + argparser.add_argument('--rps-prefer-dist-pv', action='store_true', default=False, + help="Don't allow any new large solar capacity unless 90% of distributed PV ('*DistPV') capacity has been developed.") argparser.add_argument( '--rps-allocation', default=None, choices=[ @@ -82,6 +84,16 @@ def rps_target_for_period_rule(m, p): # m.rps_fuel_limit = Param(default=float("inf"), mutable=True) m.rps_fuel_limit = Param(initialize=m.options.biofuel_limit, mutable=True) + # calculate amount of pre-existing capacity in each generation project; + # used when we want to restrict expansion + m.gen_pre_existing_capacity = Expression( + m.GENERATION_PROJECTS, + rule=lambda m, g: ( + m.GenCapacity[g, m.PERIODS.first()] + - get(m.BuildGen, (g, m.PERIODS.first()), 0) + ) + ) + # Define DispatchGenRenewableMW, which shows the amount of power produced # by each project from each fuel during each time step. define_DispatchGenRenewableMW(m) @@ -137,7 +149,7 @@ def rps_target_for_period_rule(m, p): # (doesn't ban use of biofuels in existing or multi-fuel projects, but that could # be done with --biofuel-limit 0) m.No_New_Renewables = Constraint(m.NEW_GEN_BLD_YRS, rule=lambda m, g, bld_yr: - (m.GenCapacity[g, bld_yr] <= m.GenCapacity[g, m.PERIODS.first()] - m.BuildGen[g, m.PERIODS.first()]) + (m.GenCapacity[g, bld_yr] <= m.gen_pre_existing_capacity[g]) if m.gen_energy_source[g] in m.RPS_ENERGY_SOURCES else Constraint.Skip ) @@ -146,7 +158,7 @@ def rps_target_for_period_rule(m, p): if m.options.rps_no_new_wind: # limit wind to existing capacity m.No_New_Wind = Constraint(m.NEW_GEN_BLD_YRS, rule=lambda m, g, bld_yr: - (m.GenCapacity[g, bld_yr] <= m.GenCapacity[g, m.PERIODS.first()] - m.BuildGen[g, m.PERIODS.first()]) + (m.GenCapacity[g, bld_yr] <= m.gen_pre_existing_capacity[g]) if m.gen_energy_source[g] in wind_energy_sources else Constraint.Skip ) @@ -158,6 +170,36 @@ def rps_target_for_period_rule(m, p): else Constraint.Skip ) + if m.options.rps_prefer_dist_pv: + m.DIST_PV_GENS = Set(initialize=lambda m: [ + g for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE['SUN'] + if 'DistPV' in m.gen_tech[g] + ]) + m.LARGE_PV_GENS = Set(initialize=lambda m: [ + g for g in m.GENS_BY_NON_FUEL_ENERGY_SOURCE['SUN'] + if g not in m.DIST_PV_GENS + ]) + # LargePVAllowed must be 1 to allow large PV to be built + m.LargePVAllowed = Var(m.PERIODS, within=Binary) # + # LargePVAllowed can only be 1 if 90% of the available rooftop PV has been built + m.Set_LargePVAllowed = Constraint( + m.PERIODS, + rule=lambda m, p: + sum(m.GenCapacity[g, p] for g in m.DIST_PV_GENS) + >= + m.LargePVAllowed[p] + * 0.9 + * sum(m.gen_capacity_limit_mw[g] for g in m.DIST_PV_GENS) + ) + m.Apply_LargePVAllowed = Constraint( + m.LARGE_PV_GENS, m.PERIODS, + rule=lambda m, g, p: + m.GenCapacity[g, p] + <= + m.LargePVAllowed[p] * m.gen_capacity_limit_mw[g] + + m.gen_pre_existing_capacity[g] + ) + # Don't allow (bio)fuels to provide more than a certain percentage of the system's energy # Note: when the system really wants to use more biofuel, it is possible to "game" this limit by # cycling power through batteries, pumped storage, transmission lines or the hydrogen system to From a844668fca3115e29044690e518a2a94411893d3 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 20 Sep 2018 15:14:58 -1000 Subject: [PATCH 25/38] Place limits on down-reserves from pumped-storage hydro --- .../hawaii/register_hi_storage_reserves.py | 46 ++++++++++++------- 1 file changed, 30 insertions(+), 16 deletions(-) diff --git a/switch_model/hawaii/register_hi_storage_reserves.py b/switch_model/hawaii/register_hi_storage_reserves.py index 611d20c3a..508ff28b2 100644 --- a/switch_model/hawaii/register_hi_storage_reserves.py +++ b/switch_model/hawaii/register_hi_storage_reserves.py @@ -6,15 +6,15 @@ from pyomo.environ import * # TODO: use standard reserves module for this -# note: this is modeled off of hawaii.reserves, to avoid adding lots of +# note: this is modeled off of hawaii.reserves, to avoid adding lots of # reserve-related code to the pumped storage and (formerly) hydrogen modules. # But eventually those modules should use the standard storage module and # extend that as needed. def define_arguments(argparser): - argparser.add_argument('--hawaii-storage-reserve-types', nargs='+', default=['spinning'], + argparser.add_argument('--hawaii-storage-reserve-types', nargs='+', default=['spinning'], help= - "Type(s) of reserves to provide from " # hydrogen and/or + "Type(s) of reserves to provide from " # hydrogen and/or "pumped-hydro storage " "(e.g., 'contingency regulation'). " "Default is generic 'spinning'. Specify 'none' to disable." @@ -33,24 +33,39 @@ def define_components(m): # choose how much pumped storage reserves to provide each hour, without reversing direction m.PumpedStorageSpinningUpReserves = Var(m.PH_GENS, m.TIMEPOINTS, within=NonNegativeReals) m.Limit_PumpedStorageSpinningUpReserves_When_Charging = Constraint( - m.PH_GENS, m.TIMEPOINTS, + m.PH_GENS, m.TIMEPOINTS, rule=lambda m, phg, tp: m.PumpedStorageSpinningUpReserves[phg, tp] - <= + <= m.PumpedHydroProjStoreMW[phg, tp] + m.ph_max_capacity_mw[phg] * (1 - m.PumpedStorageCharging[phg, tp]) # relax when discharging ) m.Limit_PumpedStorageSpinningUpReserves_When_Discharging = Constraint( - m.PH_GENS, m.TIMEPOINTS, + m.PH_GENS, m.TIMEPOINTS, rule=lambda m, phg, tp: m.PumpedStorageSpinningUpReserves[phg, tp] - <= + <= m.Pumped_Hydro_Proj_Capacity_MW[phg, m.tp_period[tp]] - m.PumpedHydroProjGenerateMW[phg, tp] + m.ph_max_capacity_mw[phg] * m.PumpedStorageCharging[phg, tp] # relax when charging ) - # TODO: implement down reserves m.PumpedStorageSpinningDownReserves = Var(m.PH_GENS, m.TIMEPOINTS, within=NonNegativeReals, bounds=(0,0)) - + m.Limit_PumpedStorageSpinningDownReserves_When_Charging = Constraint( + m.PH_GENS, m.TIMEPOINTS, + rule=lambda m, phg, tp: + m.PumpedStorageSpinningDownReserves[phg, tp] + <= + m.Pumped_Hydro_Proj_Capacity_MW[phg, m.tp_period[tp]] - m.PumpedHydroProjStoreMW[phg, tp] + + m.ph_max_capacity_mw[phg] * (1 - m.PumpedStorageCharging[phg, tp]) # relax when discharging + ) + m.Limit_PumpedStorageSpinningDownReserves_When_Discharging = Constraint( + m.PH_GENS, m.TIMEPOINTS, + rule=lambda m, phg, tp: + m.PumpedStorageSpinningDownReserves[phg, tp] + <= + m.PumpedHydroProjGenerateMW[phg, tp] + + m.ph_max_capacity_mw[phg] * m.PumpedStorageCharging[phg, tp] # relax when charging + ) + # Register with spinning reserves if hasattr(m, 'Spinning_Reserve_Up_Provisions'): # using spinning_reserves_advanced # calculate available slack from hawaii storage @@ -62,7 +77,7 @@ def up_expr(m, a, tp): if hasattr(m, 'PumpedStorageSpinningUpReserves'): avail += sum( m.PumpedStorageSpinningUpReserves[phg, tp] - for phg in m.PH_GENS + for phg in m.PH_GENS if m.ph_load_zone[phg] in m.ZONES_IN_BALANCING_AREA[a] ) return avail @@ -74,7 +89,7 @@ def down_expr(m, a, tp): if hasattr(m, 'PumpedStorageSpinningDownReserves'): avail += sum( m.PumpedStorageSpinningDownReserves[phg, tp] - for phg in m.PH_GENS + for phg in m.PH_GENS if m.ph_load_zone[phg] in m.ZONES_IN_BALANCING_AREA[a] ) return avail @@ -97,16 +112,16 @@ def down_expr(m, a, tp): ) # constrain reserve provision within available slack m.Limit_HawaiiStorageSpinningReserveUp = Constraint( - m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: sum( m.HawaiiStorageSpinningReserveUp[rt, ba, tp] for rt in m.HI_STORAGE_SPINNING_RESERVE_TYPES ) <= m.HawaiiStorageSlackUp[ba, tp] ) m.Limit_HawaiiStorageSpinningReserveDown = Constraint( - m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, ba, tp: + m.BALANCING_AREA_TIMEPOINTS, + rule=lambda m, ba, tp: sum( m.HawaiiStorageSpinningReserveDown[rt, ba, tp] for rt in m.HI_STORAGE_SPINNING_RESERVE_TYPES @@ -122,4 +137,3 @@ def down_expr(m, a, tp): ) m.Spinning_Reserve_Up_Provisions.append('HawaiiStorageSlackUp') m.Spinning_Reserve_Down_Provisions.append('HawaiiStorageSlackDown') - From 7dc5545d9830ca686bb5bc6b242bdc75b9eaf624 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Thu, 20 Sep 2018 15:17:42 -1000 Subject: [PATCH 26/38] Fix bug in reserve calculation for EVs that made models infeasible --- switch_model/hawaii/ev_advanced.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/switch_model/hawaii/ev_advanced.py b/switch_model/hawaii/ev_advanced.py index 3b22e8e80..1b4457c31 100644 --- a/switch_model/hawaii/ev_advanced.py +++ b/switch_model/hawaii/ev_advanced.py @@ -115,11 +115,15 @@ def rule(m): # find lowest and highest possible charging in each timepoint, used for reserve calcs m.ev_charge_min = Param( m.LOAD_ZONES, m.TIMEPOINTS, - initialize=lambda m, z, tp: min(m.ev_bid_mw[z, n, tp] for n in m.EV_BID_NUMS) + initialize=lambda m, z, tp: + m.ev_share[z, m.tp_period[tp]] + * min(m.ev_bid_mw[z, n, tp] for n in m.EV_BID_NUMS) ) m.ev_charge_max = Param( m.LOAD_ZONES, m.TIMEPOINTS, - initialize=lambda m, z, tp: max(m.ev_bid_mw[z, n, tp] for n in m.EV_BID_NUMS) + initialize=lambda m, z, tp: + m.ev_share[z, m.tp_period[tp]] + * max(m.ev_bid_mw[z, n, tp] for n in m.EV_BID_NUMS) ) # decide which share of the fleet to allocate to each charging bid From 095d25b976af2b04b0ec7e452eb0c128c4bea89c Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Wed, 20 Mar 2019 16:06:14 -1000 Subject: [PATCH 27/38] Import from pyomo.environ early to avoid masking other modules --- switch_model/hawaii/save_results.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/switch_model/hawaii/save_results.py b/switch_model/hawaii/save_results.py index 785942d64..e21d6f312 100644 --- a/switch_model/hawaii/save_results.py +++ b/switch_model/hawaii/save_results.py @@ -20,10 +20,10 @@ # added as methods (possibly by util rather than a separate reporting module). import os -import switch_model.hawaii.util as util -import switch_model.financials as financials from collections import defaultdict from pyomo.environ import * +import switch_model.hawaii.util as util +import switch_model.financials as financials def define_components(m): # Make sure the model has a dual suffix From ff5a2876ccb7741bfe7695ebb201aa3824dd7771 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Fri, 5 Apr 2019 20:38:52 -1000 Subject: [PATCH 28/38] bump version number of example model data to 2.0.1 --- examples/3zone_toy/inputs/switch_inputs_version.txt | 2 +- .../3zone_toy_stochastic_PySP/inputs/switch_inputs_version.txt | 2 +- examples/carbon_cap/inputs/switch_inputs_version.txt | 2 +- examples/ccs/inputs/switch_inputs_version.txt | 2 +- examples/copperplate0/inputs/switch_inputs_version.txt | 2 +- examples/copperplate1/inputs/switch_inputs_version.txt | 2 +- examples/custom_extension/inputs/switch_inputs_version.txt | 2 +- .../discrete_and_min_build/inputs/switch_inputs_version.txt | 2 +- examples/discrete_build/inputs/switch_inputs_version.txt | 2 +- examples/dr_simple/inputs/switch_inputs_version.txt | 2 +- examples/hydro_simple/inputs/switch_inputs_version.txt | 2 +- examples/hydro_system/inputs/switch_inputs_version.txt | 2 +- examples/new_builds_only/inputs/switch_inputs_version.txt | 2 +- examples/planning_reserves/inputs/switch_inputs_version.txt | 2 +- .../1plant/inputs/switch_inputs_version.txt | 2 +- .../3plants/inputs/switch_inputs_version.txt | 2 +- .../4plants/inputs/switch_inputs_version.txt | 2 +- .../4plants_with_unserved_load/inputs/switch_inputs_version.txt | 2 +- .../discrete_unit_commit/inputs/switch_inputs_version.txt | 2 +- .../spinning_reserves/inputs/switch_inputs_version.txt | 2 +- .../spinning_reserves_advanced/inputs/switch_inputs_version.txt | 2 +- .../unit_commit/inputs/switch_inputs_version.txt | 2 +- examples/rps_simple/inputs/switch_inputs_version.txt | 2 +- examples/storage/inputs/switch_inputs_version.txt | 2 +- 24 files changed, 24 insertions(+), 24 deletions(-) diff --git a/examples/3zone_toy/inputs/switch_inputs_version.txt b/examples/3zone_toy/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/3zone_toy/inputs/switch_inputs_version.txt +++ b/examples/3zone_toy/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/3zone_toy_stochastic_PySP/inputs/switch_inputs_version.txt b/examples/3zone_toy_stochastic_PySP/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/3zone_toy_stochastic_PySP/inputs/switch_inputs_version.txt +++ b/examples/3zone_toy_stochastic_PySP/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/carbon_cap/inputs/switch_inputs_version.txt b/examples/carbon_cap/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/carbon_cap/inputs/switch_inputs_version.txt +++ b/examples/carbon_cap/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/ccs/inputs/switch_inputs_version.txt b/examples/ccs/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/ccs/inputs/switch_inputs_version.txt +++ b/examples/ccs/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/copperplate0/inputs/switch_inputs_version.txt b/examples/copperplate0/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/copperplate0/inputs/switch_inputs_version.txt +++ b/examples/copperplate0/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/copperplate1/inputs/switch_inputs_version.txt b/examples/copperplate1/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/copperplate1/inputs/switch_inputs_version.txt +++ b/examples/copperplate1/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/custom_extension/inputs/switch_inputs_version.txt b/examples/custom_extension/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/custom_extension/inputs/switch_inputs_version.txt +++ b/examples/custom_extension/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/discrete_and_min_build/inputs/switch_inputs_version.txt b/examples/discrete_and_min_build/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/discrete_and_min_build/inputs/switch_inputs_version.txt +++ b/examples/discrete_and_min_build/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/discrete_build/inputs/switch_inputs_version.txt b/examples/discrete_build/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/discrete_build/inputs/switch_inputs_version.txt +++ b/examples/discrete_build/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/dr_simple/inputs/switch_inputs_version.txt b/examples/dr_simple/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/dr_simple/inputs/switch_inputs_version.txt +++ b/examples/dr_simple/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/hydro_simple/inputs/switch_inputs_version.txt b/examples/hydro_simple/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/hydro_simple/inputs/switch_inputs_version.txt +++ b/examples/hydro_simple/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/hydro_system/inputs/switch_inputs_version.txt b/examples/hydro_system/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/hydro_system/inputs/switch_inputs_version.txt +++ b/examples/hydro_system/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/new_builds_only/inputs/switch_inputs_version.txt b/examples/new_builds_only/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/new_builds_only/inputs/switch_inputs_version.txt +++ b/examples/new_builds_only/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/planning_reserves/inputs/switch_inputs_version.txt b/examples/planning_reserves/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/planning_reserves/inputs/switch_inputs_version.txt +++ b/examples/planning_reserves/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/production_cost_models/1plant/inputs/switch_inputs_version.txt b/examples/production_cost_models/1plant/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/production_cost_models/1plant/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/1plant/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/production_cost_models/3plants/inputs/switch_inputs_version.txt b/examples/production_cost_models/3plants/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/production_cost_models/3plants/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/3plants/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/production_cost_models/4plants/inputs/switch_inputs_version.txt b/examples/production_cost_models/4plants/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/production_cost_models/4plants/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/4plants/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/production_cost_models/4plants_with_unserved_load/inputs/switch_inputs_version.txt b/examples/production_cost_models/4plants_with_unserved_load/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/production_cost_models/4plants_with_unserved_load/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/4plants_with_unserved_load/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/production_cost_models/discrete_unit_commit/inputs/switch_inputs_version.txt b/examples/production_cost_models/discrete_unit_commit/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/production_cost_models/discrete_unit_commit/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/discrete_unit_commit/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/production_cost_models/spinning_reserves/inputs/switch_inputs_version.txt b/examples/production_cost_models/spinning_reserves/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/production_cost_models/spinning_reserves/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/spinning_reserves/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/production_cost_models/spinning_reserves_advanced/inputs/switch_inputs_version.txt b/examples/production_cost_models/spinning_reserves_advanced/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/production_cost_models/spinning_reserves_advanced/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/spinning_reserves_advanced/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/production_cost_models/unit_commit/inputs/switch_inputs_version.txt b/examples/production_cost_models/unit_commit/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/production_cost_models/unit_commit/inputs/switch_inputs_version.txt +++ b/examples/production_cost_models/unit_commit/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/rps_simple/inputs/switch_inputs_version.txt b/examples/rps_simple/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/rps_simple/inputs/switch_inputs_version.txt +++ b/examples/rps_simple/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 diff --git a/examples/storage/inputs/switch_inputs_version.txt b/examples/storage/inputs/switch_inputs_version.txt index 94789474f..38f77a65b 100644 --- a/examples/storage/inputs/switch_inputs_version.txt +++ b/examples/storage/inputs/switch_inputs_version.txt @@ -1 +1 @@ -2.0.0b4 +2.0.1 From 52c6d01a708819b47e89c7061244cf4e3c9a504f Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Fri, 5 Apr 2019 20:43:46 -1000 Subject: [PATCH 29/38] Finish introducing balancing.demand_response.iterative and fix Pyomo 5.6 compatibility --- .../demand_response/iterative/__init__.py | 71 ++++++++++++------- 1 file changed, 44 insertions(+), 27 deletions(-) diff --git a/switch_model/balancing/demand_response/iterative/__init__.py b/switch_model/balancing/demand_response/iterative/__init__.py index bd23b1ace..b9aa9868f 100644 --- a/switch_model/balancing/demand_response/iterative/__init__.py +++ b/switch_model/balancing/demand_response/iterative/__init__.py @@ -22,7 +22,11 @@ import os, sys, time from pprint import pprint from pyomo.environ import * -import pyomo.repn.canonical_repn +try: + from pyomo.repn import generate_standard_repn +except ImportError: + # this was called generate_canonical_repn before Pyomo 5.6 + from pyomo.repn import generate_canonical_repn as generate_standard_repn import switch_model.utilities as utilities # TODO: move part of the reporting back into Hawaii module and eliminate these dependencies @@ -190,22 +194,35 @@ def define_components(m): ) ) # Register with spinning reserves if it is available - if 'Spinning_Reserve_Up_Provisions' in dir(m): + if hasattr(m, 'Spinning_Reserve_Up_Provisions'): m.DemandSpinningReserveUp = Expression( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: - sum(m.DemandUpReserves[z, t] - for z in m.ZONES_IN_BALANCING_AREA[b]) + rule=lambda m, b, t: sum( + m.DemandUpReserves[z, t] for z in m.ZONES_IN_BALANCING_AREA[b] + ) ) m.Spinning_Reserve_Up_Provisions.append('DemandSpinningReserveUp') m.DemandSpinningReserveDown = Expression( m.BALANCING_AREA_TIMEPOINTS, - rule=lambda m, b, t: \ - sum(m.DemandDownReserves[g, t] - for z in m.ZONES_IN_BALANCING_AREA[b]) + rule=lambda m, b, t: sum( + m.DemandDownReserves[z, t] for z in m.ZONES_IN_BALANCING_AREA[b] + ) ) m.Spinning_Reserve_Down_Provisions.append('DemandSpinningReserveDown') + if hasattr(m, 'GEN_SPINNING_RESERVE_TYPES'): + # User has spacified advanced formulation with different reserve types. + # Code needs to be added to support this if needed (see simple.py + # for an example). This is not hard, but it gets messy to support + # both simple and advanced formulations. Eventually we should just + # standardize on the advanced formulation, and then the code will be + # fairly simple. + raise NotImplementedError( + "The {} module does not yet support provision of multiple reserve types. " + "Please contact the Switch team if you need this feature." + .format(__name__) + ) + # replace zone_demand_mw with FlexibleDemand in the energy balance constraint # note: the first two lines are simpler than the method I use, but my approach @@ -268,7 +285,7 @@ def define_components(m): # # start_time_tuples = [(s, 0) for s in m.FLAT_PRICING_START_TIMES] # for ts in m.TIMESERIES: - # timepoint_tuples = [(i * m.ts_duration_of_tp[ts], tp) for i, tp in enumerate(m.TS_TPS[ts])] + # timepoint_tuples = [(i * m.ts_duration_of_tp[ts], tp) for i, tp in enumerate(m.TPS_IN_TS[ts])] # # return d.pop(p, st) # @@ -355,7 +372,7 @@ def pre_iterate(m): # ) + m.DR_Welfare_Cost[tp] # ) * m.bring_timepoint_costs_to_base_year[tp] # for ts in m.TIMESERIES - # for tp in m.TS_TPS[ts] + # for tp in m.TPS_IN_TS[ts] # )) print "" @@ -378,7 +395,7 @@ def pre_iterate(m): for z in m.LOAD_ZONES for prod in m.DR_PRODUCTS ) * m.bring_timepoint_costs_to_base_year[tp] for ts in m.TIMESERIES - for tp in m.TS_TPS[ts] + for tp in m.TPS_IN_TS[ts] ) ) best_bid_benefit = value( @@ -611,11 +628,11 @@ def total_direct_costs_per_year(m, period): def electricity_marginal_cost(m, z, tp, prod): """Return marginal cost of providing product prod in load_zone z during timepoint tp.""" if prod == 'energy': - component = m.Energy_Balance[z, tp] + component = m.Zone_Energy_Balance[z, tp] elif prod == 'energy up': - component = m.Satisfy_Spinning_Reserve_Up_Requirement[tp] + component = m.Satisfy_Spinning_Reserve_Up_Requirement[m.zone_balancing_area[z], tp] elif prod == 'energy down': - component = m.Satisfy_Spinning_Reserve_Down_Requirement[tp] + component = m.Satisfy_Spinning_Reserve_Down_Requirement[m.zone_balancing_area[z], tp] else: raise ValueError('Unrecognized electricity product: {}.'.format(prod)) return m.dual[component]/m.bring_timepoint_costs_to_base_year[tp] @@ -866,8 +883,8 @@ def add_bids(m, bids): # record the level of demand for each timepoint for prod in m.DR_PRODUCTS: for i, tp in enumerate(m.TPS_IN_TS[ts]): - m.dr_bid[b, z, tp, prod] = demand[god][i] - m.dr_price[b, z, tp, prod] = prices[god][i] + m.dr_bid[b, z, tp, prod] = demand[prod][i] + m.dr_price[b, z, tp, prod] = prices[prod][i] print "len(m.DR_BID_LIST): {l}".format(l=len(m.DR_BID_LIST)) print "m.DR_BID_LIST: {b}".format(b=[x for x in m.DR_BID_LIST]) @@ -888,7 +905,7 @@ def add_bids(m, bids): # that no longer exist in the model). # (i.e., Energy_Balance refers to the items returned by FlexibleDemand instead of referring # to FlexibleDemand itself) - m.Energy_Balance.reconstruct() + m.Zone_Energy_Balance.reconstruct() if hasattr(m, 'SpinningReservesUpAvailable'): m.SpinningReservesUpAvailable.reconstruct() m.SpinningReservesDownAvailable.reconstruct() @@ -901,14 +918,14 @@ def add_bids(m, bids): def reconstruct_energy_balance(m): """Reconstruct Energy_Balance constraint, preserving dual values (if present).""" # copy the existing Energy_Balance object - old_Energy_Balance = dict(m.Energy_Balance) - m.Energy_Balance.reconstruct() + old_Energy_Balance = dict(m.Zone_Energy_Balance) + m.Zone_Energy_Balance.reconstruct() # TODO: now that this happens just before a solve, there may be no need to # preserve duals across the reconstruct(). if m.iteration_number > 0: for k in old_Energy_Balance: # change dual entries to match new Energy_Balance objects - m.dual[m.Energy_Balance[k]] = m.dual.pop(old_Energy_Balance[k]) + m.dual[m.Zone_Energy_Balance[k]] = m.dual.pop(old_Energy_Balance[k]) def write_batch_results(m): @@ -956,7 +973,7 @@ def summary_values(m): for p in m.PERIODS ]) - # payments by customers ([expected demand] * [gice offered for that demand]) + # payments by customers ([expected demand] * [price offered for that demand]) # note: this uses the final MC to set the final price, rather than using the # final price offered to customers. This creates consistency between the final # quantities and prices. Otherwise, we would use prices that differ from the @@ -1011,7 +1028,7 @@ def write_results(m): (lz, tp, prod): final_prices_by_timeseries[lz, ts][prod][i] for lz in m.LOAD_ZONES for ts in m.TIMESERIES - for i, tp in enumerate(m.TS_TPS[ts]) + for i, tp in enumerate(m.TPS_IN_TS[ts]) for prod in m.DR_PRODUCTS } final_quantities = { @@ -1021,7 +1038,7 @@ def write_results(m): )) for lz in m.LOAD_ZONES for ts in m.TIMESERIES - for tp in m.TS_TPS[ts] + for tp in m.TPS_IN_TS[ts] for prod in m.DR_PRODUCTS } @@ -1029,7 +1046,7 @@ def write_results(m): # for lz in m.LOAD_ZONES: # for ts in m.TIMESERIES: # for prod in m.DR_PRODUCTS: - # for i, tp in enumerate(m.TS_TPS[ts]): + # for i, tp in enumerate(m.TPS_IN_TS[ts]): # final_prices_by_timepoint[lz, ts, prod] = \ # final_prices[lz, ts][prod][i] @@ -1162,9 +1179,9 @@ def add_dual(const, lbound, ubound, duals, prefix='', offset=0.0): # cancel out any constants that were stored in the body instead of the bounds # (see https://groups.google.com/d/msg/pyomo-forum/-loinAh0Wx4/IIkxdfqxAQAJ) # (might be faster to do this once during model setup instead of every time) - canonical_constraint = pyomo.repn.canonical_repn.generate_canonical_repn(constr.body) - if canonical_constraint.constant is not None: - offset = -canonical_constraint.constant + standard_constraint = generate_standard_repn(constr.body) + if standard_constraint.constant is not None: + offset = -standard_constraint.constant add_dual(constr, value(constr.lower), value(constr.upper), m.dual, offset=offset) dual_data.sort(key=lambda r: (not r[0].startswith('DR_Convex_'), r[3] >= 0)+r) From a628f1e95434329745eb7d0104738d8741f9d39b Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Fri, 5 Apr 2019 20:46:09 -1000 Subject: [PATCH 30/38] Explain uninitialized variable warnings when saving variable.tab files. --- switch_model/reporting/__init__.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/switch_model/reporting/__init__.py b/switch_model/reporting/__init__.py index 3d6f78ccd..df97c10b2 100644 --- a/switch_model/reporting/__init__.py +++ b/switch_model/reporting/__init__.py @@ -163,7 +163,7 @@ def get_value(obj): # diagnostic expressions sometimes have 0 denominator, # e.g., AverageFuelCosts for unused fuels; val = float("nan") - except ValueError as e: + except ValueError: # If variables are not used in constraints or the # objective function, they will never get values, and # give a ValueError at this point. @@ -171,6 +171,14 @@ def get_value(obj): # otherwise the closest bound. if getattr(obj, 'value', 0) is None: val = None + # Pyomo will print an error before it raises the ValueError, + # but we say more here to help users figure out what's going on. + print ( + "WARNING: variable {} has not been assigned a value. This " + "usually indicates a coding error: either the variable is " + "not needed or it has accidentally been omitted from all " + "constraints and the objective function.".format(obj.name) + ) else: # Caught some other ValueError raise From 874fc9fb2f579d3aa4d8474a770b6ddcf8d6483d Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Fri, 5 Apr 2019 20:49:00 -1000 Subject: [PATCH 31/38] Don't define DispatchBaseloadByPeriod for periods when generators are inactive --- switch_model/generators/core/no_commit.py | 26 ++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/switch_model/generators/core/no_commit.py b/switch_model/generators/core/no_commit.py index 18a83fbb5..d7b6f2217 100644 --- a/switch_model/generators/core/no_commit.py +++ b/switch_model/generators/core/no_commit.py @@ -69,7 +69,25 @@ def define_components(mod): # (not necessarily running all available capacity) # Note: this is unconstrained, because other constraints limit project # dispatch during each timepoint and therefore the level of this variable. - mod.DispatchBaseloadByPeriod = Var(mod.BASELOAD_GENS, mod.PERIODS) + # TODO: this should be harmonized with the treatment of baseload generators + # in generators.core.commit, where baseload just means they will all be + # committed all the time, but not required to run at a constant output + # level. That's the definition of baseload used in the Hawaii power system, + # while the definition used here is more like common usage (run at a flat level + # or don't run). Since there are multiple meanings, we should probably + # have separate parameters for constant_output and always_commit. Or + # we could implement those via time-varying values for min/max commitment + # and dispatch. + mod.BASELOAD_GEN_PERIODS = Set( + dimen=2, + rule=lambda m: + [(g, p) for g in m.BASELOAD_GENS for p in m.PERIODS_FOR_GEN[g]]) + mod.BASELOAD_GEN_TPS = Set( + dimen=2, + rule=lambda m: + [(g, t) for g, p in m.BASELOAD_GEN_PERIODS for t in m.TPS_IN_PERIOD[p]]) + + mod.DispatchBaseloadByPeriod = Var(mod.BASELOAD_GEN_PERIODS) def DispatchUpperLimit_expr(m, g, t): if g in m.VARIABLE_GENS: @@ -82,11 +100,9 @@ def DispatchUpperLimit_expr(m, g, t): rule=DispatchUpperLimit_expr) mod.Enforce_Dispatch_Baseload_Flat = Constraint( - mod.GEN_TPS, + mod.BASELOAD_GEN_TPS, rule=lambda m, g, t: - (m.DispatchGen[g, t] == m.DispatchBaseloadByPeriod[g, m.tp_period[t]]) - if g in m.BASELOAD_GENS - else Constraint.Skip) + m.DispatchGen[g, t] == m.DispatchBaseloadByPeriod[g, m.tp_period[t]]) mod.Enforce_Dispatch_Upper_Limit = Constraint( mod.GEN_TPS, From 2eeec8e061524602adc2eb2929d91de73f56b8f3 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Fri, 5 Apr 2019 20:51:21 -1000 Subject: [PATCH 32/38] Move --retrieve-cplex-mip-duals capability from hawaii.switch_patch to core model. --- switch_model/hawaii/switch_patch.py | 20 ---------- switch_model/solve.py | 61 +++++++++++++++++++++++------ 2 files changed, 49 insertions(+), 32 deletions(-) diff --git a/switch_model/hawaii/switch_patch.py b/switch_model/hawaii/switch_patch.py index 1ec784c5a..cbc2a4346 100644 --- a/switch_model/hawaii/switch_patch.py +++ b/switch_model/hawaii/switch_patch.py @@ -1,26 +1,6 @@ from pyomo.environ import * import switch_model.utilities as utilities -# patch Pyomo's solver to retrieve duals and reduced costs for MIPs from cplex lp solver -# (This could be made permanent in pyomo.solvers.plugins.solvers.CPLEX.create_command_line) -def new_create_command_line(*args, **kwargs): - # call original command - command = old_create_command_line(*args, **kwargs) - # alter script - if hasattr(command, 'script') and 'optimize\n' in command.script: - command.script = command.script.replace( - 'optimize\n', - 'optimize\nchange problem fix\noptimize\n' - # see http://www-01.ibm.com/support/docview.wss?uid=swg21399941 - # and http://www-01.ibm.com/support/docview.wss?uid=swg21400009 - ) - print "changed CPLEX solve script to the following:" - print command.script - return command -from pyomo.solvers.plugins.solvers.CPLEX import CPLEXSHELL -old_create_command_line = CPLEXSHELL.create_command_line -CPLEXSHELL.create_command_line = new_create_command_line - # Code below is in switch_model.solve now # # micro-patch pyomo.core.base.PyomoModel.ModelSolutions.add_solution # # to use a cache for component names; otherwise reloading a solution diff --git a/switch_model/solve.py b/switch_model/solve.py index aa59b4b79..68ec410db 100755 --- a/switch_model/solve.py +++ b/switch_model/solve.py @@ -456,6 +456,13 @@ def define_arguments(argparser): argparser.add_argument("--tempdir", default=None, help='The name of a directory to hold temporary files produced by the solver. ' 'This is usually paired with --keepfiles and --symbolic-solver-labels.') + argparser.add_argument( + '--retrieve-cplex-mip-duals', default=False, action='store_true', + help=( + "Patch Pyomo's solver script for cplex to re-solve and retrieve " + "dual values for mixed-integer programs." + ) + ) # NOTE: the following could potentially be made into standard arguments for all models, # e.g. by defining them in a define_standard_arguments() function in switch.utilities.py @@ -659,18 +666,15 @@ def solve(model): # import pdb; pdb.set_trace() model.solver_manager = SolverManagerFactory(model.options.solver_manager) - # get solver arguments (if any) - if hasattr(model, "options"): - solver_args = dict( - options_string=model.options.solver_options_string, - keepfiles=model.options.keepfiles, - tee=model.options.tee, - symbolic_solver_labels=model.options.symbolic_solver_labels - ) - # drop all the unspecified options - solver_args = {k: v for (k, v) in solver_args.items() if v is not None} - else: - solver_args={} + # get solver arguments + solver_args = dict( + options_string=model.options.solver_options_string, + keepfiles=model.options.keepfiles, + tee=model.options.tee, + symbolic_solver_labels=model.options.symbolic_solver_labels + ) + # drop all the unspecified options + solver_args = {k: v for (k, v) in solver_args.items() if v is not None} # Automatically send all defined suffixes to the solver solver_args["suffixes"] = [ @@ -688,6 +692,10 @@ def solve(model): if not hasattr(model.solver, "_options_string_to_dict"): solver_args.pop("options_string", "") + # patch Pyomo to retrieve MIP duals from cplex if needed + if model.options.retrieve_cplex_mip_duals: + retrieve_cplex_mip_duals() + # solve the model if model.options.verbose: timer = StepTimer() @@ -758,6 +766,35 @@ def solve(model): model.last_results = results return results +def retrieve_cplex_mip_duals(): + """patch Pyomo's solver to retrieve duals and reduced costs for MIPs + from cplex lp solver. (This could be made permanent in + pyomo.solvers.plugins.solvers.CPLEX.create_command_line).""" + from pyomo.solvers.plugins.solvers.CPLEX import CPLEXSHELL + old_create_command_line = CPLEXSHELL.create_command_line + def new_create_command_line(*args, **kwargs): + # call original command + command = old_create_command_line(*args, **kwargs) + # alter script + if hasattr(command, 'script') and 'optimize\n' in command.script: + command.script = command.script.replace( + 'optimize\n', + 'optimize\nchange problem fix\noptimize\n' + # see http://www-01.ibm.com/support/docview.wss?uid=swg21399941 + # and http://www-01.ibm.com/support/docview.wss?uid=swg21400009 + ) + print "changed CPLEX solve script to the following:" + print command.script + else: + print ( + "Unable to patch CPLEX solver script to retrieve duals " + "for MIP problems" + ) + return command + new_create_command_line.is_patched = True + if not getattr(CPLEXSHELL.create_command_line, 'is_patched', False): + CPLEXSHELL.create_command_line = new_create_command_line + # taken from https://software.sandia.gov/trac/pyomo/browser/pyomo/trunk/pyomo/opt/base/solvers.py?rev=10784 # This can be removed when all users are on Pyomo 4.2 From e861893f5b108cf0111f5364ca2e869f01395f0e Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Fri, 5 Apr 2019 20:53:49 -1000 Subject: [PATCH 33/38] Upgrade data/modules to 2.0.1 and report changes in modules between versions. --- switch_model/upgrade/manager.py | 36 ++++--- switch_model/upgrade/upgrade_2_0_1.py | 146 ++++++++++++++++++++++++++ 2 files changed, 166 insertions(+), 16 deletions(-) create mode 100644 switch_model/upgrade/upgrade_2_0_1.py diff --git a/switch_model/upgrade/manager.py b/switch_model/upgrade/manager.py index 8af391d31..2046d32bd 100644 --- a/switch_model/upgrade/manager.py +++ b/switch_model/upgrade/manager.py @@ -13,24 +13,28 @@ import upgrade_2_0_0b1 import upgrade_2_0_0b2 import upgrade_2_0_0b4 +import upgrade_2_0_1 -# Available upgrade code. This needs to be in consecutive order so +# Available upgrade code. This needs to be in consecutive order so # upgrade_inputs can incrementally apply the upgrades. upgrade_plugins = [ - (upgrade_2_0_0b1, - upgrade_2_0_0b1.upgrades_from, + (upgrade_2_0_0b1, + upgrade_2_0_0b1.upgrades_from, upgrade_2_0_0b1.upgrades_to), - (upgrade_2_0_0b2, + (upgrade_2_0_0b2, upgrade_2_0_0b2.upgrades_from, upgrade_2_0_0b2.upgrades_to), - (upgrade_2_0_0b4, + (upgrade_2_0_0b4, upgrade_2_0_0b4.upgrades_from, upgrade_2_0_0b4.upgrades_to), + (upgrade_2_0_1, + upgrade_2_0_1.upgrades_from, + upgrade_2_0_1.upgrades_to), ] - + # Not every code revision requires an update, so we hard-code the last # revision that required an update. -last_required_update = '2.0.0b4' +last_required_update = '2.0.1' code_version = StrictVersion(switch_model.__version__) version_file = 'switch_inputs_version.txt' @@ -50,7 +54,7 @@ def get_input_version(inputs_dir): """ Scan the inputs directory and take a best-guess at version number. In the simple case, this will be in the stored in switch_inputs_version.txt - Args: + Args: inputs_dir (str) path to inputs folder Returns: version (str) of inputs folder @@ -82,7 +86,7 @@ def _write_input_version(inputs_dir, new_version): def do_inputs_need_upgrade(inputs_dir): """ Determine if input directory can be upgraded with this script. - Args: + Args: inputs_dir (str) path to inputs folder Returns: (boolean) @@ -119,9 +123,9 @@ def upgrade_inputs(inputs_dir, backup=True): # Successively apply the upgrade scripts as needed. for (upgrader, v_from, v_to) in upgrade_plugins: inputs_v = StrictVersion(get_input_version(inputs_dir)) - # note: the next line catches datasets created by/for versions of Switch that + # note: the next line catches datasets created by/for versions of Switch that # didn't require input directory upgrades - if StrictVersion(v_from) <= inputs_v < StrictVersion(v_to): + if StrictVersion(v_from) <= inputs_v < StrictVersion(v_to): print_verbose('\tUpgrading from ' + v_from + ' to ' + v_to) upgrader.upgrade_input_dir(inputs_dir) print_verbose('\tFinished upgrading ' + inputs_dir + '\n') @@ -142,7 +146,7 @@ def main(args=None): else: if not os.path.isdir(args.inputs_dir_name): print("Error: Input directory {} does not exist.".format(args.inputs_dir_name)) - return -1 + return -1 upgrade_inputs(os.path.normpath(args.inputs_dir_name), args.backup) def set_verbose(verbosity): @@ -150,13 +154,13 @@ def set_verbose(verbosity): verbose = verbosity def add_parser_args(parser): - parser.add_argument("--inputs-dir-name", type=str, default="inputs", + parser.add_argument("--inputs-dir-name", type=str, default="inputs", help='Input directory name (default is "inputs")') - parser.add_argument("--backup", action='store_true', default=True, + parser.add_argument("--backup", action='store_true', default=True, help='Make backup of inputs directory before upgrading (set true by default)') - parser.add_argument("--no-backup", action='store_false', dest='backup', + parser.add_argument("--no-backup", action='store_false', dest='backup', help='Do not make backup of inputs directory before upgrading') - parser.add_argument("--recursive", dest="recursive", + parser.add_argument("--recursive", dest="recursive", default=False, action='store_true', help=('Recursively scan the provided path for inputs directories ' 'named "inputs", and upgrade each directory found. Note, this ' diff --git a/switch_model/upgrade/upgrade_2_0_1.py b/switch_model/upgrade/upgrade_2_0_1.py new file mode 100644 index 000000000..16217200b --- /dev/null +++ b/switch_model/upgrade/upgrade_2_0_1.py @@ -0,0 +1,146 @@ +# Copyright (c) 2015-2017 The Switch Authors. All rights reserved. +# Licensed under the Apache License, Version 2.0, which is in the LICENSE file. + +""" +Upgrade input directories from 2.0.0b4 (final beta) to 2.0.1. (There were no changes for 2.0.0.) +This just moves some modules, as listed in the rename_modules variable. +""" + +import os, shutil, argparse +import pandas +import switch_model.upgrade + +upgrades_from = '2.0.0b4' +upgrades_to = '2.0.1' + +# note: we could keep switch_model.hawaii.reserves active, but then we would need special code to switch +# the model to the main reserves module if and only if they are using the iterative demand response system +# which seems unnecessarily complicated +replace_modules = { + 'switch_model.hawaii.demand_response': + ['switch_model.balancing.demand_response.iterative'], + 'switch_model.hawaii.r_demand_system': + ['switch_model.balancing.demand_response.iterative.r_demand_system'], + 'switch_model.hawaii.reserves': [ + 'switch_model.balancing.operating_reserves.areas', + 'switch_model.balancing.operating_reserves.spinning_reserves', + ] +} +module_messages = { + # description of significant changes to particular modules (other than moving) + # old_module: message + 'switch_model.hawaii.r_demand_system': + 'The switch_model.hawaii.r_demand_system module has been moved. Please update ' + 'the --dr-demand-module flag to point to the new location.', + 'switch_model.hawaii.demand_response': + 'The switch_model.hawaii.demand_response module has been moved. Please update ' + 'iterate.txt to refer to the new location.', + 'switch_model.hawaii.switch_patch': + 'The switch_model.hawaii.switch_patch module no longer patches ' + 'the cplex solver to generate dual values for mixed-integer programs. ' + 'Use the new --retrieve-cplex-mip-duals flag if you need this behavior.' +} + +def upgrade_input_dir(inputs_dir): + """ + Upgrade the input directory. + """ + # rename modules and report changes + update_modules(inputs_dir) + + # Write a new version text file. + switch_model.upgrade._write_input_version(inputs_dir, upgrades_to) + + +def rename_file(old_name, new_name, optional_file=True): + old_path = os.path.join(inputs_dir, old_name) + new_path = os.path.join(inputs_dir, new_name) + if optional_file and not os.path.isfile(old_path): + return + shutil.move(old_path, new_path) + +def rename_column(file_name, old_col_name, new_col_name, optional_file=True): + path = os.path.join(inputs_dir, file_name) + if optional_file and not os.path.isfile(path): + return + df = pandas.read_csv(path, na_values=['.'], sep='\t') + df.rename(columns={old_col_name: new_col_name}, inplace=True) + df.to_csv(path, sep='\t', na_rep='.', index=False) + +def item_list(items): + """Generate normal-text version of list of items, with commas and "and" as needed.""" + return ' and '.join(', '.join(items).rsplit(', ', 1)) + +def update_modules(inputs_dir): + """Rename modules in the module list if needed (list is sought in + standard locations) and return list of alerts for user.""" + + modules_path = os.path.join(inputs_dir, 'modules.txt') + if not os.path.isfile(modules_path): + modules_path = os.path.join(inputs_dir, '..', 'modules.txt') + if not os.path.isfile(modules_path): + raise RuntimeError( + "Unable to find modules or modules.txt file for input directory '{}'. " + "This file should be located in the input directory or its parent." + .format(inputs_dir) + ) + modules_path = os.path.normpath(modules_path) # tidy up for display later + + # Upgrade module listings + # Each line of the original file is either a module identifier or a comment + with open(modules_path) as f: + old_module_list = [line.strip() for line in f.read().splitlines()] + + # rename modules as needed + new_module_list=[] + for module in old_module_list: + try: + new_modules = replace_modules[module] + print ( + "Module {old} has been replaced by {new} in {file}." + .format(old=module, new=item_list(new_modules), file=modules_path) + ) + except KeyError: + new_modules = [module] + new_module_list.extend(new_modules) + + # load reserve balancing areas module early, to support modules that + # define reserve products. + + # switch_model.hawaii.reserves loaded late and then found reserve + # components defined by other modules, but + # switch_model.balancing.operating_reserves.spinning_reserves should + # load early so other modules can register reserves with it. + if 'switch_model.hawaii.reserves' in old_module_list: + new_spin = 'switch_model.balancing.operating_reserves.areas' + try: + insert_pos = new_module_list.index('switch_model.balancing.load_zones') + 1 + if insert_pos < new_module_list.index(new_spin): + new_module_list.remove(new_spin) + new_module_list.insert(insert_pos, new_spin) + # print ( + # '{} has been moved up to row {} in {}, ' + # 'to allow other modules to register reserves with it.' + # .format(new_spin, insert_pos + 1, modules_path) + # ) + except ValueError: + # couldn't find the location to insert spinning reserves module + print ( + '{} module should be moved early in the module list, ' + 'before any modules that define reserve elements.' + .format(new_spin) + ) + + #import pdb; pdb.set_trace() + + # write new modules list + with open(modules_path, 'w') as f: + for module in new_module_list: + f.write(module + "\n") + + # report any significant changes in the previously active modules + for module in old_module_list: + try: + print "ATTENTION: {}".format(module_messages[module]) + except KeyError: + pass From c744e0d2feb72d29c86dc7a7a10f038325f52249 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Fri, 5 Apr 2019 20:55:13 -1000 Subject: [PATCH 34/38] Replace switch_mod more selectively to avoid creating "switch_modelel" if the upgrade runs twice. --- switch_model/upgrade/upgrade_2_0_0b2.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/switch_model/upgrade/upgrade_2_0_0b2.py b/switch_model/upgrade/upgrade_2_0_0b2.py index 633cf0cc7..90cd2feba 100644 --- a/switch_model/upgrade/upgrade_2_0_0b2.py +++ b/switch_model/upgrade/upgrade_2_0_0b2.py @@ -16,11 +16,11 @@ def upgrade_input_dir(inputs_dir): """ - Upgrade an input directory to rename the main package from 'switch_mod' + Upgrade an input directory to rename the main package from 'switch_mod' to 'switch_model' in the modules.txt file. """ - # Find modules.txt; it should be either in the inputs directory or in its - # parent directory. + # Find modules.txt; it should be either in the inputs directory or in its + # parent directory. modules_path = os.path.join(inputs_dir, 'modules.txt') if not os.path.isfile(modules_path): modules_path = os.path.join(inputs_dir, '..', 'modules.txt') @@ -32,10 +32,19 @@ def upgrade_input_dir(inputs_dir): ) # Replace switch_mod with switch_model in modules.txt + # Note: the original version of this script blindly replaced + # "switch_mod" with "switch_model", which clobbered module names + # already updated more carefully by upgrade_2_0_0b1.py. + # This was updated 2019-04-01 to do this better. + # TODO: this script is redundant with upgrade_2_0_0b1.py + # and could be eliminated. with open(modules_path) as f: module_list = [line.strip() for line in f.read().splitlines()] - final_module_list = [line.replace('switch_mod', 'switch_model') - for line in module_list] + final_module_list = [ + 'switch_model' + line[10:] if line.startswith('switch_mod.') or line == 'switch_mod' + else line + for line in module_list + ] with open(modules_path, 'w') as f: for module in final_module_list: From 90f188cdf0276c0a4ba247a3090b7a6264ae6ce5 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Sat, 6 Apr 2019 16:23:17 -1000 Subject: [PATCH 35/38] Move CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD back to hawaii.switch_patch This set is useful for assets with lifetimes longer than the study, but that assumption is generally poor practice. It is better to use explicit lifetimes. So this component is not widely useful. --- switch_model/hawaii/switch_patch.py | 176 ++-------------------------- switch_model/timescales.py | 8 -- 2 files changed, 12 insertions(+), 172 deletions(-) diff --git a/switch_model/hawaii/switch_patch.py b/switch_model/hawaii/switch_patch.py index cbc2a4346..f4faa9e60 100644 --- a/switch_model/hawaii/switch_patch.py +++ b/switch_model/hawaii/switch_patch.py @@ -1,155 +1,18 @@ from pyomo.environ import * -import switch_model.utilities as utilities - -# Code below is in switch_model.solve now -# # micro-patch pyomo.core.base.PyomoModel.ModelSolutions.add_solution -# # to use a cache for component names; otherwise reloading a solution -# # takes longer than solving the model from scratch. -# # TODO: create a pull request for Pyomo to do this -# import inspect, textwrap, types -# -# def replace_method(class_ref, method_name, new_source_code): -# """ -# Replace specified class method with a compiled version of new_source_code. -# """ -# orig_method = getattr(class_ref, method_name) -# # compile code into a function -# workspace = dict() -# exec(textwrap.dedent(new_source_code), workspace) -# new_method = workspace[method_name] -# # create a new function with the same body, but using the old method's namespace -# new_func = types.FunctionType( -# new_method.__code__, -# orig_method.__globals__, -# orig_method.__name__, -# orig_method.__defaults__, -# orig_method.__closure__ -# ) -# # note: this normal function will be automatically converted to an unbound -# # method when it is assigned as an attribute of a class -# setattr(class_ref, method_name, new_func) -# -# old_code = """ -# for obj in instance.component_data_objects(Var): -# cache[obj.name] = obj -# for obj in instance.component_data_objects(Objective, active=True): -# cache[obj.name] = obj -# for obj in instance.component_data_objects(Constraint, active=True): -# cache[obj.name] = obj -# """ -# new_code = """ -# # use buffer to avoid full search of component for data object -# # which introduces a delay that is quadratic in model size -# buf=dict() -# for obj in instance.component_data_objects(Var): -# cache[obj.getname(fully_qualified=True, name_buffer=buf)] = obj -# for obj in instance.component_data_objects(Objective, active=True): -# cache[obj.getname(fully_qualified=True, name_buffer=buf)] = obj -# for obj in instance.component_data_objects(Constraint, active=True): -# cache[obj.getname(fully_qualified=True, name_buffer=buf)] = obj -# """ -# -# from pyomo.core.base.PyomoModel import ModelSolutions -# add_solution_code = inspect.getsource(ModelSolutions.add_solution) -# if old_code in add_solution_code: -# # create and inject a new version of the method code -# add_solution_code = add_solution_code.replace(old_code, new_code) -# replace_method(ModelSolutions, 'add_solution', add_solution_code) -# else: -# print( -# "NOTE: The patch to pyomo.core.base.PyomoModel.ModelSolutions.add_solution " -# "has been deactivated because the Pyomo source code has changed. " -# "Check whether this patch is still needed and edit {} accordingly." -# .format(__file__) -# ) -# -# x = 7 -# def prx(arg=81): -# print x, arg -# prx() -# # both of these fail, readonly attribute -# # prx.__globals__ = {'prx': prx, 'x': 99} -# # prx.func_globals = {'prx': prx, 'x': 99} -# -# f = prx -# prx2 = types.FunctionType(f.__code__, {'prx': prx, 'x': 99}, f.__name__, f.__defaults__, f.__closure__) -# prx2(22) -# -# f = ModelSolutions.add_solution -# new_f = types.FunctionType(f.__code__, f.__globals__, f.__name__, f.__defaults__, f.__closure__) -# -# type(prx) -# -# def func(test='no argument'): -# print test -# func(3) -# -# func.func_globals -# -# new_func = types.FunctionType( -# func.func_code, -# func.func_globals, -# 'new_func' -# ) -# new_func() -# -# from pyomo.environ import * -# ms = ModelSolutions(AbstractModel()) -# ms.add_solution(1, 2, 3, 4, 5, 6, 7) -# -# from pyomo.environ import * -# from pyomo.core.base.PyomoModel import ModelSolutions -# new_code = """ -# def add_symbol_map(self, symbol_map=None): -# print logging -# """ -# replace_method(ModelSolutions, 'add_symbol_map', new_code) -# ms = ModelSolutions(AbstractModel()) -# ms.add_symbol_map() -# -# ms.add_solution.func_code.co_names -# -# new_bytecode = types.CodeType( -# fc.co_argcount, -# fc.co_nlocals, -# fc.co_stacksize, -# fc.co_flags, -# new_method.func_code.co_code, -# fc.co_consts, -# fc.co_names, -# fc.co_varnames, -# fc.co_filename, -# fc.co_name, -# fc.co_firstlineno, -# fc.co_lnotab -# ) -# -# -# ModelSolutions.add_symbol_map -# -# import types -# class C(object): -# def __init__(self): -# self.val = 3 -# -# def get_val(self): -# return self.val -# -# C.get_val = types.MethodType(get_val, None, C) -# C.get_val -# C.__init__ -# o = C() -# o.get_val() -# -# o.get_val = types.MethodType(get_val, o) -# o.get_val() +def define_components(m): + """Make various changes to the model to support hawaii-specific modules.""" -# (class_ref, name, new_source_code) = (ModelSolutions, 'add_solution', add_solution_code) -# -# space = dict() -# exec("class Dummy(object):\n" + add_solution_code, space) -# type(space['Dummy'].add_solution) + # define an indexed set of all periods before or including the current one. + # this is useful for calculations that must index over previous and current periods + # e.g., amount of capacity of some resource that has been built + # This isn't very good form (generally every asset should have a lifetime and we + # should refer to that), so it's not included in the core model. + mod.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD = Set( + mod.PERIODS, ordered=True, + initialize=lambda m, p: + [p2 for p2 in m.PERIODS if m.PERIODS.ord(p2) <= m.PERIODS.ord(p)] + ) # # TODO: combine the following changes into a pull request for Pyomo # # patch Pyomo's table-reading function to allow .tab files with headers but no data @@ -202,18 +65,3 @@ # print "Unable to patch current version of pyomo.core.data.process_data:" # print '{}({})'.format(type(e).__name__, ','.join(repr(a) for a in e.args)) # print "Switch will not be able to read empty data files." - - -# def define_components(m): -# """Make various changes to the model to facilitate reporting and avoid unwanted behavior""" -# -# # define an indexed set of all periods before or including the current one. -# # this is useful for calculations that must index over previous and current periods -# # e.g., amount of capacity of some resource that has been built -# m.CURRENT_AND_PRIOR_PERIODS = Set(m.PERIODS, ordered=True, initialize=lambda m, p: -# # note: this is a fast way to refer to all previous periods, which also respects -# # the built-in ordering of the set, but you have to be careful because -# # (a) pyomo sets are indexed from 1, not 0, and -# # (b) python's range() function is not inclusive on the top end. -# [m.PERIODS[i] for i in range(1, m.PERIODS.ord(p)+1)] -# ) diff --git a/switch_model/timescales.py b/switch_model/timescales.py index d0af25db0..cb863b82f 100644 --- a/switch_model/timescales.py +++ b/switch_model/timescales.py @@ -355,14 +355,6 @@ def validate_period_lengths_rule(m, p): mod.PERIODS, rule=validate_period_lengths_rule) - # define an indexed set of all periods before or including the current one. - # this is useful for calculations that must index over previous and current periods - mod.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD = Set( - mod.PERIODS, ordered=True, - initialize=lambda m, p: - [p2 for p2 in m.PERIODS if m.PERIODS.ord(p2) <= m.PERIODS.ord(p)] - ) - def load_inputs(mod, switch_data, inputs_dir): """ From e1c38cd671f2dab4376ba47dfcc6f715ea2f8ecd Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Sat, 6 Apr 2019 16:28:22 -1000 Subject: [PATCH 36/38] Bug fix in hawaii.switch_patch --- switch_model/hawaii/switch_patch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/switch_model/hawaii/switch_patch.py b/switch_model/hawaii/switch_patch.py index f4faa9e60..5658f7fb0 100644 --- a/switch_model/hawaii/switch_patch.py +++ b/switch_model/hawaii/switch_patch.py @@ -8,8 +8,8 @@ def define_components(m): # e.g., amount of capacity of some resource that has been built # This isn't very good form (generally every asset should have a lifetime and we # should refer to that), so it's not included in the core model. - mod.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD = Set( - mod.PERIODS, ordered=True, + m.CURRENT_AND_PRIOR_PERIODS_FOR_PERIOD = Set( + m.PERIODS, ordered=True, initialize=lambda m, p: [p2 for p2 in m.PERIODS if m.PERIODS.ord(p2) <= m.PERIODS.ord(p)] ) From df100ae817de1671bbabeb5c497eedb00ff3bda0 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Mon, 8 Apr 2019 10:20:20 -1000 Subject: [PATCH 37/38] Add CHANGELOG.txt --- CHANGELOG.txt | 111 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 CHANGELOG.txt diff --git a/CHANGELOG.txt b/CHANGELOG.txt new file mode 100644 index 000000000..c49fb6dd4 --- /dev/null +++ b/CHANGELOG.txt @@ -0,0 +1,111 @@ +------------------------------------------------------------------------------- +Switch 2.0.1 +------------------------------------------------------------------------------- + +- General + - Switch is now compatible with Pyomo 5.6+ (in addition to earlier + versions). + - A new --no-post-solve option prevents all post-solve actions (e.g., saving + variable results). + - If the user specifies --reload-prior-solution, Switch now behaves as if it + had just solved the model, i.e., after loading the solution, it runs post- + solve code unless --no-post-solve is specified (useful for re-running + reporting code), and it only drops to an interactive Python prompt if the + user also specifies --interact. + - A new --no-save-solution disables automatic solution-saving. This saves + time and disk space for models that won't need to be reloaded later. + - New --quiet and --no-stream-solver arguments cancel --verbose and + --stream-solver. + - A new "--save-expression[s] ..." argument can be used to + save values for any Pyomo Expression object to a .tab file after the model + is solved (similar to the automatic saving of variable values). This also + works for Param objects. + - The --include-module(s), --exclude-module(s), --save-expression(s), + --suffix(es) and --scenario(s) flags can now be used repeatedly on the + command line, in options.txt or in scenarios.txt. The include and exclude + options will be applied in the order they are encountered, in options.txt, + then scenarios.txt, then the command line. + - A new --retrieve-cplex-mip-duals flag can be used to support retrieving + duals for a MIP program from the cplex solver (users must also turn on the + "duals") suffix. This flag patches the Pyomo-generated cplex command + script to pass the "change problem fix" command to the solver and then + solve a second time. This fixes integer variables at their final values, + then re-solves to obtain duals. This flag is not needed with the cplexamp + solver. + - A new balancing.demand_response.iterative module has been added. This was + formerly in the Hawaii regional package. This moduel performs iterative solutions with any convex demand system, based on a bid-response + process. + - New indexed sets have been added to allow efficient selection of groups of + generators that use a particular technology, fuel or non-fuel energy + source: GENS_BY_TECHNOLOGY, GENS_BY_FUEL, GENS_BY_NON_FUEL_ENERGY_SOURCE. + - Generator capacity data is saved to gen_cap.tab instead of gen_cap.txt and + rows are sorted if user specifies --sorted-output. + - Even if a model has solver warnings, results will be reported and + post-solve will be performed if a valid solution is available. + - A more descriptive warning is given when switch_model.reporting finds an + uninitialized variable. + - A warning is given about potential errors parsing arguments in the form + "--flag=value". Python's argument parsing module can make mistakes with + these, so "--flag value" is a safer choice. + - Switch now monkeypatches Pyomo to accelerate reloading prior solutions. + Previously Pyomo 5.1.1 (and maybe others) took longer to load prior + solutions than solving the model. + - At startup, "switch solve-scenarios" will restart jobs that were + previously interrupted after being started by the same worker (same + --job-id argument or SWITCH_JOB_ID environment variable). Note that + workers automatically pull scenarios from the scenario list file until + there are none left to solve, and avoid solving scenarios that have been + pulled by other workers. Each worker should be given a unique job ID, and + this ID should be reused if the worker is terminated and restarted. The + new behavior ensures that jobs are not abandoned if a worker is restarted. + +- Upgrade scripts + - The upgrade scripts now report changes in module behavior or renamed + modules while upgrading an inputs directory. This only reports changes to + modules used in the current model. + - The hawaii.reserves module is automatically replaced by + balancing.operating_reserves.areas and + balancing.operating_reserves.spinning_reserves in the module list. + - The hawaii.demand_response module is replaced by + balancing.demand_response.iterative and hawaii.r_demand_system is replaced + by balancing.demand_response.iterative.r_demand_system in the module list. + - "switch_mod" will not be changed to "switch_modelel" if a module file is + upgraded from 2.0.0b1 to 2.0.0b2 twice. + +- Hawaii regional package + - The hawaii.reserves module has been deprecated and the + hawaii.demand_response module has been moved (see upgrade scripts) + - Switch now places limits on down-reserves from pumped-storage hydro. + - A new --rps-prefer-dist-pv option for hawaii.rps will prevent construction + of new large PV until 90% of distributed PV potential has been developed. + - Limits on load-shifting between hours in hawaii.demand_response_simple + have been formalized. + - The Hawaii storage energy cost calculation has been fixed. + - Total production by energy source is reported by hawaii.save_results, + ad-hoc technologies are added to production_by_technology.tsv, and + hourly dispatch is disaggregated by non-fuel technologies. + - Bugs have been fixed in reserve calculation for EVs and in + hawaii.smooth_dispatch and hawaii.scenario_data. + - hawaii.smooth_dispatch minimizes total inter-hour change instead of square + of levels. The old quadratic smoothing method has been moved to + hawaii.smooth_dispatch.quadratic (slow and possibly buggy). + - The must-run requirement in hawaii.kalaeloa is turned off when RPS or EV + share is above 75% (can be overridden by --run-kalaeloa-even-with-high-rps) + - Support for nominal-dollar fuel price forecasts has been dropped from + hawaii.scenario_data + - A new --no-hydrogen flag can be used to deactivate the hydrogen module. + - The hawaii.ev_advanced module now calculates vehicle fleet emissions. + +------------------------------------------------------------------------------- +Switch 2.0.0 +------------------------------------------------------------------------------- + +First public release of Switch 2. This uses a similar framework to Switch 1, +but with numerous improvements. The most significant are: + +- Written in Python instead of AMPL language +- Modular approach, so components can be easily added or removed from model +- Modeling of unit commitment and part load heat rates (optional) +- Generalization of sample timeseries to have arbitrary length instead of single + days +- Standardized reporting, e.g., automatic export of all variable values From 823672ed7cae9d769d71a7dbce4ba0a04a9e2398 Mon Sep 17 00:00:00 2001 From: Matthias Fripp Date: Mon, 8 Apr 2019 10:28:41 -1000 Subject: [PATCH 38/38] Change setup.py status from Beta to Production/Stable and note Linux support correctly. --- setup.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/setup.py b/setup.py index 55dc0d1d3..c5e6950b7 100644 --- a/setup.py +++ b/setup.py @@ -1,12 +1,12 @@ -"""Setup script for SWITCH. +"""Setup script for SWITCH. Use "pip install --upgrade ." to install a copy in the site packages directory. -Use "pip install --upgrade --editable ." to install SWITCH to be run from its +Use "pip install --upgrade --editable ." to install SWITCH to be run from its current location. -Optional dependencies can be added during the initial install or later by -running a command like this: +Optional dependencies can be added during the initial install or later by +running a command like this: pip install --upgrade --editable .[advanced,database_access] Use "pip uninstall switch" to uninstall switch from your system. @@ -36,7 +36,7 @@ def read(*rnames): description='SWITCH Power System Planning Model', long_description=read('README'), classifiers=[ - 'Development Status :: 4 - Beta', + 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Education', 'Intended Audience :: End Users/Desktop', @@ -45,16 +45,16 @@ def read(*rnames): 'Natural Language :: English', 'Operating System :: Microsoft :: Windows', 'Operating System :: MacOS :: MacOS X', + 'Operating System :: POSIX :: Linux', 'Operating System :: Unix', 'Programming Language :: Python', - 'Programming Language :: Unix Shell', 'Topic :: Scientific/Engineering', 'Topic :: Software Development :: Libraries :: Python Modules' ], packages=find_packages(include=['switch_model', 'switch_model.*']), keywords=[ - 'renewable', 'power', 'energy', 'electricity', - 'production cost', 'capacity expansion', + 'renewable', 'power', 'energy', 'electricity', + 'production cost', 'capacity expansion', 'planning', 'optimization' ], install_requires=[