Skip to content

Commit

Permalink
Merge pull request #116 from Breakthrough-Energy/bainan/extract_duals
Browse files Browse the repository at this point in the history
feat: add functions to extract duals in SwitchExtract class
  • Loading branch information
BainanXia authored Jul 9, 2021
2 parents 229133d + 72116f6 commit 6f18ade
Show file tree
Hide file tree
Showing 2 changed files with 102 additions and 16 deletions.
10 changes: 6 additions & 4 deletions switchwrapper/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,15 @@ def match_variables(variables, pattern, columns, value_name="capacity"):
with key "Value" and float value.
:param str pattern: regex pattern to use to search for matching variables.
:param iterable columns: names to extract from match to data frame columns.
:param str value_name: define the column name of values, defaults to "capacity".
:return: (*pandas.DataFrame*) -- data frame of matching variables.
"""
prog = re.compile(pattern)
df = pd.DataFrame(
[
{
**{name: m.group(name) for name in columns},
value_name: variables[m.group(0)]["Value"],
value_name: list(variables[m.group(0)].values())[0],
}
for m in [
prog.match(v) for v in variables.keys() if prog.match(v) is not None
Expand Down Expand Up @@ -72,9 +73,10 @@ def parse_timepoints(var_dict, variables, timestamps_to_timepoints, value_name):
a timestamp-indexed dataframe.
:param dict var_dict: a flat dictionary where the keys are a string
containing both variable names and variable parameters and the values
are a dictionary where Value is the datapoint for that combination of
variable name and parameters.
containing both variable names and variable indexes and the values
are a dictionary. This dictionary has a single key ("Value" for primal
variables, or "Dual" for dual variables) and the value is the data point for
that combination of variable name and indexes.
:param list variables: a list of timeseries variable strings to parse out
:param pandas.DataFrame timestamps_to_timepoints: data frame indexed by
timestamps with a column of timepoints for each timestamp.
Expand Down
108 changes: 96 additions & 12 deletions switchwrapper/switch_extract.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@ def __init__(
self.timestamps_to_timepoints = load_timestamps_to_timepoints(
timestamps_to_timepoints_file
)
self._timestamp_to_investment_year(timepoints_file)
self._add_timepoint_weight()
self._add_investment_year(timepoints_file)
self._get_parsed_data(results_file)
self.plant_id_mapping, _ = recover_plant_indices(
self.parsed_data["DispatchGen"].columns.map(lambda x: x[1])
Expand All @@ -53,23 +54,29 @@ def __init__(
self.ac_branch_id_mapping,
self.dc_branch_id_mapping,
) = branch_indices_to_bus_tuple(grid)
self._calculate_abs_transmission_duals()
self.grids = construct_grids_from_switch_results(grid, self.results)

self.loads = pd.read_csv(loads_file)
self.variable_capacity_factors = pd.read_csv(variable_capacity_factors_file)
self._reconstruct_input_profiles()

def _timestamp_to_investment_year(self, timepoints_file):
def _add_timepoint_weight(self):
"""Add weights to timestamps_to_timepoints data frame based on timepoints."""
self.timestamps_to_timepoints["weight"] = self.timestamps_to_timepoints[
"timepoint"
].map(self.timestamps_to_timepoints.squeeze().value_counts())

def _add_investment_year(self, timepoints_file):
"""Get investment year for each timestamp via timepoints.
:param str timepoints_file: file path of timepoints.csv.
"""
timepoints = pd.read_csv(timepoints_file)
timepoints.set_index("timepoint_id", inplace=True)
self.timestamp_to_investment_year = pd.Series(
self.timestamps_to_timepoints["timepoint"].map(timepoints["ts_period"]),
index=self.timestamps_to_timepoints.index,
)
self.timestamps_to_timepoints[
"investment_year"
] = self.timestamps_to_timepoints["timepoint"].map(timepoints["ts_period"])

def _get_parsed_data(self, results_file):
"""Parse Switch results to get raw time series of pg and pf.
Expand All @@ -78,11 +85,22 @@ def _get_parsed_data(self, results_file):
"""
with open(results_file, "rb") as f:
self.results = pickle.load(f)
data = self.results.solution._list[0].Variable
variables_to_parse = ["DispatchGen", "DispatchTx"]
self.parsed_data = parse_timepoints(
data, variables_to_parse, self.timestamps_to_timepoints, "dispatch"
)
data = ["Variable", "Constraint"]
variables_to_parse = [
["DispatchGen", "DispatchTx"],
["Zone_Energy_Balance", "Maximum_DispatchTx"],
]
value_names = ["dispatch", "dual"]
self.parsed_data = dict()
for d, var, vn in zip(data, variables_to_parse, value_names):
self.parsed_data.update(
parse_timepoints(
self.results.solution._list[0][d],
var,
self.timestamps_to_timepoints,
value_name=vn,
)
)

def get_pg(self):
"""Get time series power generation for each plant.
Expand All @@ -98,7 +116,8 @@ def get_pg(self):
pg = dict()
for year, grid in self.grids.items():
pg[year] = all_pg.loc[
self.timestamp_to_investment_year == year, grid.plant.index
self.timestamps_to_timepoints["investment_year"] == year,
grid.plant.index,
]
pg[year].index = pd.Index(pg[year].index.map(pd.Timestamp), name="UTC")
return pg
Expand Down Expand Up @@ -160,6 +179,68 @@ def get_dcline_pf(self):
)
return dcline_pf

def get_lmp(self):
"""Get time series lmp for each bus in every investment year.
:return: (*dict*) -- keys are investment years, values are data frames indexed
by timestamps with bus_id as columns.
"""
all_lmp = self.parsed_data["Zone_Energy_Balance"].copy()
all_lmp.columns = all_lmp.columns.map(lambda x: int(x[1]))
lmp = dict()
for year, grid in self.grids.items():
lmp[year] = all_lmp.loc[
self.timestamps_to_timepoints["investment_year"] == year, grid.bus.index
].divide(self.timestamps_to_timepoints["weight"], axis="index")
lmp[year].index = pd.Index(lmp[year].index.map(pd.Timestamp), name="UTC")
return lmp

def _calculate_abs_transmission_duals(self):
"""Calculate absolute values of transmission duals between every bus tuple."""
self.abs_cong = (
self.parsed_data["Maximum_DispatchTx"]
.abs()
.divide(self.timestamps_to_timepoints["weight"], axis="index")
)
self.abs_cong.columns = self.abs_cong.columns.map(
lambda x: tuple(map(int, x[1].split(",")))
)
self.abs_cong.index = pd.Index(
self.abs_cong.index.map(pd.Timestamp), name="UTC"
)

def get_congu(self):
"""Get time series congu, i.e. congestion at upper power flow limit, for each
ac branch in every investment year.
:return: (*dict*) -- keys are investment years, values are data frames indexed
by timestamps with branch_id as columns.
"""
congu = dict()
for year, grid in self.grids.items():
congu[year] = self.abs_cong[
grid.branch.index.map(self.ac_branch_id_mapping)
]
congu[year].columns = grid.branch.index
return congu

def get_congl(self):
"""Get time series congl, i.e. congestion at lower power flow limit, for each
ac branch in every investment year.
:return: (*dict*) -- keys are investment years, values are data frames indexed
by timestamps with branch_id as columns.
"""
congl = dict()
abs_cong_mirror = self.abs_cong.copy()
abs_cong_mirror.columns = abs_cong_mirror.columns.map(lambda x: (x[1], x[0]))
for year, grid in self.grids.items():
congl[year] = abs_cong_mirror[
grid.branch.index.map(self.ac_branch_id_mapping)
]
congl[year].columns = grid.branch.index
return congl

def _reconstruct_input_profiles(self):
"""Given the temporally-reduced profiles that are given to Switch and the
reduction mapping, reconstruct full-dimension profiles for the Grid that is
Expand Down Expand Up @@ -293,6 +374,9 @@ def get_output_scenarios(switch_files_root):
hydro=se.get_hydro()[year],
solar=se.get_solar()[year],
wind=se.get_wind()[year],
lmp=se.get_lmp()[year],
congu=se.get_congu()[year],
congl=se.get_congl()[year],
)
mock_scenario.state.grid = se.grids[year]
scenarios[year] = mock_scenario
Expand Down

0 comments on commit 6f18ade

Please sign in to comment.