diff --git a/powersimdata/input/abstract_grid.py b/powersimdata/input/abstract_grid.py index 5c6525507..6f00b7105 100644 --- a/powersimdata/input/abstract_grid.py +++ b/powersimdata/input/abstract_grid.py @@ -1,15 +1,25 @@ +import os + import pandas as pd from powersimdata.input import const +from powersimdata.input.helpers import ( + add_coord_to_grid_data_frames, + add_zone_to_grid_data_frames, + csv_to_data_frame, +) +from powersimdata.network.csv_reader import CSVReader class AbstractGrid: - """Grid Builder.""" + """Grid Builder. Child classes must assign self.top_dirname and + self.umbrella_interconnect before self.__init__ is called, or re-define the __init__ + and/or methods called within the __init__ to avoid an AttributeError. + """ def __init__(self): """Constructor""" self.data_loc = None - self.interconnect = None self.zone2id = {} self.id2zone = {} self.sub = pd.DataFrame() @@ -20,6 +30,63 @@ def __init__(self): self.bus = pd.DataFrame() self.branch = pd.DataFrame() self.storage = storage_template() + self._set_data_loc() + self._build_network() + + def _set_data_loc(self): + """Sets data location. + + :raises IOError: if directory does not exist. + """ + data_loc = os.path.join(self.top_dirname, "data") + if os.path.isdir(data_loc) is False: + raise IOError("%s directory not found" % data_loc) + else: + self.data_loc = data_loc + + def _build_network(self): + """Build network.""" + reader = CSVReader(self.data_loc) + self.bus = reader.bus + self.plant = reader.plant + self.branch = reader.branch + self.dcline = reader.dcline + self.gencost["after"] = self.gencost["before"] = reader.gencost + + self._add_information_to_model() + + if self.umbrella_interconnect not in self.interconnect: + self._drop_interconnect() + + def _add_information_to_model(self): + self.sub = csv_to_data_frame(self.data_loc, "sub.csv") + self.bus2sub = csv_to_data_frame(self.data_loc, "bus2sub.csv") + self.id2zone = csv_to_data_frame(self.data_loc, "zone.csv").zone_name.to_dict() + self.zone2id = {v: k for k, v in self.id2zone.items()} + + add_zone_to_grid_data_frames(self) + add_coord_to_grid_data_frames(self) + + def _drop_interconnect(self): + """Trim data frames to only keep information pertaining to the user + defined interconnect(s). + + """ + for key, value in self.__dict__.items(): + if key in ["sub", "bus2sub", "bus", "plant", "branch"]: + value.query("interconnect == @self.interconnect", inplace=True) + elif key == "gencost": + value["before"].query( + "interconnect == @self.interconnect", inplace=True + ) + elif key == "dcline": + value.query( + "from_interconnect == @self.interconnect &" + "to_interconnect == @self.interconnect", + inplace=True, + ) + self.id2zone = {k: self.id2zone[k] for k in self.bus.zone_id.unique()} + self.zone2id = {value: key for key, value in self.id2zone.items()} def storage_template(): diff --git a/powersimdata/input/grid.py b/powersimdata/input/grid.py index 2f8c9d114..f4c487482 100644 --- a/powersimdata/input/grid.py +++ b/powersimdata/input/grid.py @@ -3,6 +3,7 @@ from powersimdata.data_access.context import Context from powersimdata.data_access.scenario_list import ScenarioListManager from powersimdata.input.scenario_grid import FromREISE, FromREISEjl +from powersimdata.network.hifld.model import HIFLD from powersimdata.network.model import ModelImmutables from powersimdata.network.usa_tamu.constants import storage as tamu_storage from powersimdata.network.usa_tamu.model import TAMU @@ -13,7 +14,7 @@ class Grid: - SUPPORTED_MODELS = {"usa_tamu"} + SUPPORTED_MODELS = {"hifld", "usa_tamu"} SUPPORTED_ENGINES = {"REISE", "REISE.jl"} """Grid @@ -49,11 +50,15 @@ def __init__(self, interconnect, source="usa_tamu", engine="REISE"): data = cached elif source == "usa_tamu": data = TAMU(interconnect) + elif source == "hifld": + data = HIFLD(interconnect) elif os.path.splitext(source)[1] == ".mat": if engine == "REISE": data = FromREISE(source) elif engine == "REISE.jl": data = FromREISEjl(source) + else: + raise ValueError(f"Unknown source: {source}") self.data_loc = data.data_loc self.interconnect = data.interconnect diff --git a/powersimdata/input/scenario_grid.py b/powersimdata/input/scenario_grid.py index 449638d9e..b1e0ed488 100644 --- a/powersimdata/input/scenario_grid.py +++ b/powersimdata/input/scenario_grid.py @@ -21,21 +21,19 @@ def __init__(self, filename): :param str filename: path to file. """ + self.filename = filename super().__init__() - self._set_data_loc(filename) - self._build_network() - - def _set_data_loc(self, filename): + def _set_data_loc(self): """Sets data location. :param str filename: path to file :raises FileNotFoundError: if file does not exist. """ - if os.path.isfile(filename) is False: - raise FileNotFoundError("%s file not found" % filename) + if os.path.isfile(self.filename) is False: + raise FileNotFoundError("%s file not found" % self.filename) else: - self.data_loc = filename + self.data_loc = self.filename def _read_network(self): data = loadmat(self.data_loc, squeeze_me=True, struct_as_record=False) diff --git a/powersimdata/network/hifld/__init__.py b/powersimdata/network/hifld/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/powersimdata/network/hifld/constants/__init__.py b/powersimdata/network/hifld/constants/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/powersimdata/network/hifld/constants/plants.py b/powersimdata/network/hifld/constants/plants.py new file mode 100644 index 000000000..ef950613f --- /dev/null +++ b/powersimdata/network/hifld/constants/plants.py @@ -0,0 +1,105 @@ +_exports = [ + "all_resources", + "carbon_per_mmbtu", + "carbon_per_mwh", + "carbon_resources", + "clean_resources", + "label2type", + "nox_per_mwh", + "renewable_resources", + "so2_per_mwh", + "type2color", + "type2hatchcolor", + "type2label", +] + +type2color = { + "wind": "xkcd:green", + "solar": "xkcd:amber", + "hydro": "xkcd:light blue", + "ng": "xkcd:orchid", + "nuclear": "xkcd:silver", + "coal": "xkcd:light brown", + "geothermal": "xkcd:hot pink", + "dfo": "xkcd:royal blue", + "biomass": "xkcd:dark green", + "other": "xkcd:melon", + "storage": "xkcd:orange", + "wind_offshore": "xkcd:teal", + "solar_curtailment": "xkcd:amber", + "wind_curtailment": "xkcd:green", + "wind_offshore_curtailment": "xkcd:teal", +} + +type2label = { + "nuclear": "Nuclear", + "geothermal": "Geo-thermal", + "coal": "Coal", + "dfo": "DFO", + "hydro": "Hydro", + "ng": "Natural Gas", + "solar": "Solar", + "wind": "Wind", + "wind_offshore": "Wind Offshore", + "biomass": "Biomass", + "other": "Other", + "storage": "Storage", + "solar_curtailment": "Solar Curtailment", + "wind_curtailment": "Wind Curtailment", + "wind_offshore_curtailment": "Offshore Wind Curtailment", +} + +type2hatchcolor = { + "solar_curtailment": "xkcd:grey", + "wind_curtailment": "xkcd:grey", + "wind_offshore_curtailment": "xkcd:grey", +} + +label2type = {value: key for key, value in type2label.items()} + +renewable_resources = {"solar", "wind", "wind_offshore"} +carbon_resources = {"coal", "ng", "dfo"} +clean_resources = renewable_resources | {"geothermal", "hydro", "nuclear"} +all_resources = carbon_resources | {"other"} | clean_resources + + +# MWh to kilograms of CO2 +# Source: IPCC Special Report on Renewable Energy Sources and Climate Change +# Mitigation (2011), Annex II: Methodology, Table A.II.4, 50th percentile +# http://www.ipcc-wg3.de/report/IPCC_SRREN_Annex_II.pdf +carbon_per_mwh = { + "coal": 1001, + "dfo": 840, + "ng": 469, +} + +# MMBTu of fuel per hour to kilograms of CO2 per hour +# Source: https://www.epa.gov/energy/greenhouse-gases-equivalencies-calculator-calculations-and-references +# = (Heat rate MMBTu/h) * (kg C/mmbtu) * (mass ratio CO2/C) +carbon_per_mmbtu = { + "coal": 26.05, + "dfo": 20.31, + "ng": 14.46, +} + +# MWh to kilograms of NOx +# Source: EPA eGrid 2018, tab 'US18' (U.S. summary), columns AN to AP +# https://www.epa.gov/egrid/egrid-questions-and-answers +nox_per_mwh = { + "coal": 0.658, + "dfo": 1.537, + "ng": 0.179, +} + +# MWh to kilograms of SO2 +# Source: EPA eGrid 2018, tab 'US18' (U.S. summary), columns AV to AX +# https://www.epa.gov/egrid/egrid-questions-and-answers +so2_per_mwh = { + "coal": 0.965, + "dfo": 2.189, + "ng": 0.010, +} + + +def __dir__(): + return sorted(_exports) diff --git a/powersimdata/network/hifld/constants/storage.py b/powersimdata/network/hifld/constants/storage.py new file mode 100644 index 000000000..d3b667e63 --- /dev/null +++ b/powersimdata/network/hifld/constants/storage.py @@ -0,0 +1,17 @@ +_exports = ["defaults"] + +defaults = { + "duration": 4, + "min_stor": 0.05, + "max_stor": 0.95, + "InEff": 0.9, + "OutEff": 0.9, + "energy_value": 20, + "LossFactor": 0, + "terminal_min": 0, + "terminal_max": 1, +} + + +def __dir__(): + return sorted(_exports) diff --git a/powersimdata/network/hifld/constants/zones.py b/powersimdata/network/hifld/constants/zones.py new file mode 100644 index 000000000..cde476e0d --- /dev/null +++ b/powersimdata/network/hifld/constants/zones.py @@ -0,0 +1,246 @@ +import os + +import pandas as pd + +_exports = [ + "abv", + "abv2interconnect", + "abv2loadzone", + "abv2state", + "id2abv", + "id2loadzone", + "id2timezone", + "interconnect", + "interconnect2abv", + "interconnect2id", + "interconnect2loadzone", + "interconnect2timezone", + "interconnect_combinations", + "loadzone", + "loadzone2id", + "loadzone2interconnect", + "loadzone2state", + "mappings", + "state", + "state2abv", + "state2loadzone", + "timezone2id", +] + +mappings = {"loadzone", "state", "state_abbr", "interconnect"} + +# Define combinations of interconnects +interconnect_combinations = { + "USA": {"Eastern", "Western", "ERCOT"}, +} + + +# Map state abbreviations to state name +abv2state = { + "AK": "Alaska", + "AL": "Alabama", + "AR": "Arkansas", + "AZ": "Arizona", + "CA": "California", + "CO": "Colorado", + "CT": "Connecticut", + "DE": "Delaware", + "FL": "Florida", + "GA": "Georgia", + "HI": "Hawaii", + "IA": "Iowa", + "ID": "Idaho", + "IL": "Illinois", + "IN": "Indiana", + "KS": "Kansas", + "KY": "Kentucky", + "LA": "Louisiana", + "MA": "Massachusetts", + "MD": "Maryland", + "ME": "Maine", + "MI": "Michigan", + "MN": "Minnesota", + "MO": "Missouri", + "MS": "Mississippi", + "MT": "Montana", + "NC": "North Carolina", + "ND": "North Dakota", + "NE": "Nebraska", + "NH": "New Hampshire", + "NJ": "New Jersey", + "NM": "New Mexico", + "NV": "Nevada", + "NY": "New York", + "OH": "Ohio", + "OK": "Oklahoma", + "OR": "Oregon", + "PA": "Pennsylvania", + "RI": "Rhode Island", + "SC": "South Carolina", + "SD": "South Dakota", + "TN": "Tennessee", + "TX": "Texas", + "UT": "Utah", + "VA": "Virginia", + "VT": "Vermont", + "WA": "Washington", + "WI": "Wisconsin", + "WV": "West Virginia", + "WY": "Wyoming", +} + + +# Map state name to state abbreviations +state2abv = {value: key for key, value in abv2state.items()} + + +# Map zones to higher-level aggregations using the information in zone.csv +zone_csv_path = os.path.join(os.path.dirname(__file__), "..", "data", "zone.csv") +zone_df = pd.read_csv(zone_csv_path, index_col=0) + +# load zone id to load zone name +id2loadzone = zone_df["zone_name"].to_dict() +# load zone name to load zone id +loadzone2id = {v: k for k, v in id2loadzone.items()} +# Map state name to load zone name +state2loadzone = { + k: set(v) for k, v in zone_df.groupby("state").zone_name.unique().to_dict().items() +} +# Map interconnect name to load zone name +interconnect2loadzone = { + k: set(v) + for k, v in zone_df.groupby("interconnect").zone_name.unique().to_dict().items() +} +interconnect2loadzone["USA"] = ( + interconnect2loadzone["Eastern"] + | interconnect2loadzone["Western"] + | interconnect2loadzone["ERCOT"] +) +# Map interconnect to load zone id +interconnect2id = { + k: set(zone_df.isin(v).query("zone_name == True").index) + for k, v in interconnect2loadzone.items() +} + +# Map load zone id to state abbreviations +id2abv = {k: state2abv[v] for k, v in zone_df.state.to_dict().items()} + + +# Map state abbreviations to load zone name +abv2loadzone = { + state2abv[state]: loadzone for state, loadzone in state2loadzone.items() +} + + +# Map load zone name to state name +loadzone2state = {} +for state, zone_set in state2loadzone.items(): + loadzone2state.update({zone: state for zone in zone_set}) + + +# Map load zone name to interconnect name +loadzone2interconnect = { + zone: interconnect + for interconnect, zone_set in interconnect2loadzone.items() + for zone in zone_set + if interconnect not in interconnect_combinations +} + + +# Map interconnect name to state abbreviations +# Note: states which span interconnects are assigned to the one they're 'most' in. +interconnect2abv = { + "Eastern": { + "ME", + "NH", + "VT", + "MA", + "RI", + "CT", + "NY", + "NJ", + "PA", + "DE", + "MD", + "VA", + "NC", + "SC", + "GA", + "FL", + "AL", + "MS", + "TN", + "KY", + "WV", + "OH", + "MI", + "IN", + "IL", + "WI", + "MN", + "IA", + "MO", + "AR", + "LA", + "OK", + "KS", + "NE", + "SD", + "ND", + }, + "ERCOT": {"TX"}, + "Western": {"WA", "OR", "CA", "NV", "AZ", "UT", "NM", "CO", "WY", "ID", "MT"}, +} +interconnect2abv["USA"] = ( + interconnect2abv["Eastern"] + | interconnect2abv["Western"] + | interconnect2abv["ERCOT"] +) + + +# Map state abbreviations to interconnect name +abv2interconnect = {} +for k, v in interconnect2abv.items(): + if k in interconnect_combinations: + continue + for s in v: + abv2interconnect[s] = k + + +# List of interconnect name +interconnect = set(interconnect2abv.keys()) + + +# List of state name +state = set(state2abv.keys()) + + +# List of state abbreviations +abv = set(abv2state.keys()) + + +# List of load zone name +loadzone = set(loadzone2interconnect.keys()) + +# Map interconnect name to time zone +interconnect2timezone = { + "USA": "ETC/GMT+6", + "Eastern": "ETC/GMT+5", + "ERCOT": "ETC/GMT+6", + "Western": "ETC/GMT+8", + "Eastern_ERCOT": "ETC/GMT+5", + "Eastern_Western": "ETC/GMT+6", + "ERCOT_Western": "ETC/GMT+7", +} + + +# Map load zone IDs to time zones +# Note: load zones in > 1 time zone are put in the one where most load centers reside +id2timezone = zone_df["time_zone"].to_dict() + +# Map time zones to load zone IDs +timezone2id = {k: set(v) for k, v in zone_df.groupby("time_zone").groups.items()} + + +def __dir__(): + return sorted(_exports) diff --git a/powersimdata/network/hifld/model.py b/powersimdata/network/hifld/model.py new file mode 100644 index 000000000..225a25ac0 --- /dev/null +++ b/powersimdata/network/hifld/model.py @@ -0,0 +1,50 @@ +import os + +from powersimdata.input.abstract_grid import AbstractGrid +from powersimdata.network.hifld.constants.storage import defaults + + +class HIFLD(AbstractGrid): + def __init__(self, interconnect): + """Constructor.""" + self.top_dirname = os.path.dirname(__file__) + self.interconnect = check_and_format_interconnect(interconnect) + self.umbrella_interconnect = "USA" + super().__init__() + self.storage.update(defaults) + + +def check_and_format_interconnect(interconnect): + """Checks interconnect. + + :param str/iterable interconnect: interconnect name(s). + :return: (*list*) -- interconnect(s) + :raises TypeError: if parameter has wrong type. + :raises ValueError: if interconnect not found or combination of interconnect is not + appropriate. + """ + if isinstance(interconnect, str): + interconnect = [interconnect] + try: + interconnect = sorted(set(interconnect)) + except: # noqa + raise TypeError("interconnect must be either str or an iterable of str") + + possible = ["Eastern", "Western", "ERCOT", "USA"] + if any(i for i in interconnect if i not in possible): + raise ValueError("Wrong interconnect. Choose from %s" % " | ".join(possible)) + n = len(interconnect) + if "USA" in interconnect and n > 1: + raise ValueError("'USA' cannot be paired") + if n == 3: + raise ValueError("Use 'USA' instead") + + return interconnect + + +def interconnect_to_name(interconnect): + """Return name of interconnect or collection of interconnects. + + :param iterable interconnect: interconnect name(s). + """ + return "_".join(sorted(check_and_format_interconnect(interconnect))) diff --git a/powersimdata/network/model.py b/powersimdata/network/model.py index 4be574ce1..19a1d4940 100644 --- a/powersimdata/network/model.py +++ b/powersimdata/network/model.py @@ -29,7 +29,7 @@ def _check_model(model): :param str model: grid model name :raises ValueError: if grid model does not exist. """ - possible = {"usa_tamu"} + possible = {"usa_tamu", "hifld"} if model not in possible: raise ValueError("model must be one of %s" % " | ".join(possible)) diff --git a/powersimdata/network/usa_tamu/model.py b/powersimdata/network/usa_tamu/model.py index 903578682..5d4509f88 100644 --- a/powersimdata/network/usa_tamu/model.py +++ b/powersimdata/network/usa_tamu/model.py @@ -1,12 +1,6 @@ import os from powersimdata.input.abstract_grid import AbstractGrid -from powersimdata.input.helpers import ( - add_coord_to_grid_data_frames, - add_zone_to_grid_data_frames, - csv_to_data_frame, -) -from powersimdata.network.csv_reader import CSVReader from powersimdata.network.usa_tamu.constants.storage import defaults @@ -18,61 +12,12 @@ class TAMU(AbstractGrid): def __init__(self, interconnect): """Constructor.""" - super().__init__() - self._set_data_loc() - + self.top_dirname = os.path.dirname(__file__) self.interconnect = check_and_format_interconnect(interconnect) - self._build_network() - - def _set_data_loc(self): - """Sets data location. - - :raises IOError: if directory does not exist. - """ - top_dirname = os.path.dirname(__file__) - data_loc = os.path.join(top_dirname, "data") - if os.path.isdir(data_loc) is False: - raise IOError("%s directory not found" % data_loc) - else: - self.data_loc = data_loc - - def _build_network(self): - """Build network.""" - reader = CSVReader(self.data_loc) - self.bus = reader.bus - self.plant = reader.plant - self.branch = reader.branch - self.dcline = reader.dcline - self.gencost["after"] = self.gencost["before"] = reader.gencost - + self.umbrella_interconnect = "USA" + super().__init__() self.storage.update(defaults) - add_information_to_model(self) - - if "USA" not in self.interconnect: - self._drop_interconnect() - - def _drop_interconnect(self): - """Trim data frames to only keep information pertaining to the user - defined interconnect(s). - - """ - for key, value in self.__dict__.items(): - if key in ["sub", "bus2sub", "bus", "plant", "branch"]: - value.query("interconnect == @self.interconnect", inplace=True) - elif key == "gencost": - value["before"].query( - "interconnect == @self.interconnect", inplace=True - ) - elif key == "dcline": - value.query( - "from_interconnect == @self.interconnect &" - "to_interconnect == @self.interconnect", - inplace=True, - ) - self.id2zone = {k: self.id2zone[k] for k in self.bus.zone_id.unique()} - self.zone2id = {value: key for key, value in self.id2zone.items()} - def check_and_format_interconnect(interconnect): """Checks interconnect. @@ -108,17 +53,3 @@ def interconnect_to_name(interconnect): :param list interconnect: interconnect name(s). """ return "_".join(sorted(check_and_format_interconnect(interconnect))) - - -def add_information_to_model(model): - """Adds information to TAMU model. This is done inplace. - - :param powersimdata.input.TAMU model: TAMU instance. - """ - model.sub = csv_to_data_frame(model.data_loc, "sub.csv") - model.bus2sub = csv_to_data_frame(model.data_loc, "bus2sub.csv") - model.id2zone = csv_to_data_frame(model.data_loc, "zone.csv").zone_name.to_dict() - model.zone2id = {v: k for k, v in model.id2zone.items()} - - add_zone_to_grid_data_frames(model) - add_coord_to_grid_data_frames(model)