Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

updates to run with mac/linux #75

Open
wants to merge 9 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 17 additions & 17 deletions environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,22 +4,22 @@ channels:
- conda-forge
- defaults
dependencies:
- cloudpickle # BSD-3-Clause license
- glpk # GNU GPL license
- ipykernel # BSD-3-Clause license
- ipython # BSD License
- nbconvert # BSD-3-Clause license
- nbformat # BSD-3-Clause license
- nrel-pysam>=3.0.1 # BSD-3-Clause license
- cloudpickle==3.0.0 # BSD-3-Clause license
- glpk==5.0 # GNU GPL license
- ipykernel==6.26.0 # BSD-3-Clause license
- ipython==8.16.1 # BSD License
- nbconvert==7.9.2 # BSD-3-Clause license
- nbformat==5.9.2 # BSD-3-Clause license
- nrel-pysam==3.0.2 # BSD-3-Clause license
- nrel-pysam-stubs # BSD-3-Clause license
- numpy # BSD-3-Clause license
- openpyxl # MIT license
- pandas # BSD-3-Clause license
- pint # BSD license
- plotly # MIT license
- pyomo>=6.0.0,<=6.4.0 # BSD license
- python>=2.7.12 # Python Software Foundation License
- numpy==1.26.0 # BSD-3-Clause license
- openpyxl==3.1.2 # MIT license
- pandas==2.1.1 # BSD-3-Clause license
- pint==0.22 # BSD license
- plotly==5.17.0 # MIT license
- pyomo==6.4.0 # BSD license
- python==3.10.12 # Python Software Foundation License
- pytz # MIT license
- pyutilib>=6.0.0 # GNU AGPLv3 license
- requests # Apache 2.0 license
- xlrd # BSD license
- pyutilib==6.0.0 # GNU AGPLv3 license
- requests==2.31.0 # Apache 2.0 license
- xlrd==2.0.1 # BSD license
5 changes: 4 additions & 1 deletion match_model/generate_input_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,10 @@ def generate_inputs(model_workspace):
if solver == "cbc":
print("Copying CBC solver to model run directory...")
# copy the cbc solver to the model workspace
shutil.copy("../../cbc.exe", model_workspace)
if os.path.exists("../../cbc"):
shutil.copy("../../cbc", model_workspace)
elif os.path.exists("../../cbc.exe"):
shutil.copy("../../cbc.exe", model_workspace)

# create the scenario folders in the input and output directories
try:
Expand Down
16 changes: 16 additions & 0 deletions match_model/notebooks/nb_utilities.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import os, shutil


def refresh_folder(model_workspace, keep_files):
# loop through all files and folders in model_workspace
for file in os.listdir(model_workspace):
# if the file is not in the keep_files list
if file not in keep_files:
# if the file is a folder
if os.path.isdir(os.path.join(model_workspace, file)):
# remove the folder and all its contents
shutil.rmtree(os.path.join(model_workspace, file))
# if the file is a file
else:
# remove the file
os.remove(os.path.join(model_workspace, file))
99 changes: 58 additions & 41 deletions match_model/notebooks/run_scenarios.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -21,7 +21,8 @@
"from pathlib import Path\n",
"import match_model.generate_input_files as generate_input_files\n",
"import shutil\n",
"import time\n"
"import time\n",
"from nb_utilities import refresh_folder"
]
},
{
Expand All @@ -34,7 +35,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -76,6 +77,10 @@
"# check if the directory exists\n",
"if os.path.exists(model_workspace / \"inputs\"):\n",
" # check that an inputs version file exists\n",
" # list of files and folders to keep\n",
" keep_files = ['set_1','set_2','set_3', 'set_4', 'cbc', 'cbc.exe', 'model_inputs.xlsx']\n",
" refresh_folder(model_workspace, keep_files=keep_files)\n",
"\n",
" if os.path.exists(model_workspace / \"inputs_version.txt\"):\n",
" # get the version number\n",
" with open(model_workspace / \"inputs_version.txt\", \"r\") as i:\n",
Expand Down Expand Up @@ -125,11 +130,48 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### Option 1: Run as many parallel threads as possible\n",
"## Windows Operating Systems:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"os.system(\n",
" f'start cmd /k \"cd {model_run_location} & activate match_model & match solve-scenarios\"'\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Running with Mac / Linux:"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Steps to run on mac:\n",
"1. Open a new Terminal, and navigate to the folder with the homework/model_inputs.xslx.\n",
"2. Run `mamba activate match_model` \n",
"3. run `match solve-scenarios`\n",
"\n",
"If you would like to run as many scenarios in parallel as your machine can support, run the following cell. \n",
"If not you can open the summary_reports/ ...._.html file in a browser and investigate the results of your model run.\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Helpers for Rerunning Model\n",
"After you have run one scenario if you would like to rerun the scenario you will need to delete certain files.\n",
"\n",
"This will identify how many processor cores (n) exist on your machine, and automatically run (n-1) scenarios (or the total number of scnearios, whichever is less). This should leave at least one core of your machine open for other tasks, but we still recommend that you close all other windows/processes before starting this option."
"To regenerate the input dataset:"
]
},
{
Expand All @@ -138,52 +180,27 @@
"metadata": {},
"outputs": [],
"source": [
"num_processors = mp.cpu_count()\n",
"print(f\"This machine has {num_processors} CPU cores\")\n",
"# get the number of scenarios to run\n",
"num_scenarios = len(os.listdir(model_workspace / \"inputs\"))\n",
"print(f\"There are {num_scenarios} scenarios to run\")\n",
"# determine how many threads to open\n",
"parallel_threads = min(num_processors - 1, num_scenarios)\n",
"print(f\"Running {parallel_threads} scenarios in parallel...\")\n",
"i = 0\n",
"while i < parallel_threads:\n",
" os.system(\n",
" f'start cmd /k \"cd {model_run_location} & activate match_model & match solve-scenarios\"'\n",
" )\n",
" time.sleep(2)\n",
" i += 1"
"# list of files and folders to keep\n",
"keep_files = ['set_1', 'cbc', 'model_inputs.xlsx']\n",
"refresh_folder(model_workspace, keep_files=keep_files)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Option 2: Start scenarios one at a time\n",
"\n",
"If you would like to only open a single solver window, run the following cell. Each time you run the following cell will open a new parallel solver."
"To rerun the match_model solver.... ie to run \"match solve-scenarios\":"
]
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"0"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"os.system(\n",
" f'start cmd /k \"cd {model_run_location} & activate match_model & match solve-scenarios\"'\n",
")"
"# list of files and folders to keep\n",
"keep_files = ['cbc' , 'inputs_version.txt' , 'options.txt', 'set_1', 'inputs', 'model_inputs.xlsx', 'outputs', 'scenarios.txt', 'summary_reports' ]\n",
"refresh_folder(model_workspace, keep_files=keep_files)"
]
}
],
Expand All @@ -203,7 +220,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.8"
"version": "3.10.12"
},
"orig_nbformat": 2,
"vscode": {
Expand Down
7 changes: 4 additions & 3 deletions match_model/reporting/generate_report.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,15 @@
import shutil




def post_solve(instance, outdir, inputs_dir):
"""
Runs the summary report
"""
# get the name of the scenario
scenario = str(outdir).split("/")[-1]

# shutil.copy('../reporting/summary_report.ipynb', inputs_dir)

# run the notebook
try:
os.system(
Expand All @@ -25,9 +25,10 @@ def post_solve(instance, outdir, inputs_dir):
os.system(
f'jupyter nbconvert --ExecutePreprocessor.kernel_name="python3" --to notebook --execute --inplace {inputs_dir}/summary_report.ipynb'
)

# convert the notebook to html and save it to the output directory
os.system(
f"jupyter nbconvert --to html --no-input --no-prompt {inputs_dir}/summary_report.ipynb --output-dir {outdir}/../../summary_reports --output summary_report_{scenario}"
)
# delete the notebook from the inputs directory
os.remove(f"{inputs_dir}/summary_report.ipynb")
# os.remove(f"{inputs_dir}/summary_report.ipynb")
7 changes: 4 additions & 3 deletions match_model/reporting/report_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -474,11 +474,11 @@ def calculate_generator_utilization(dispatch):

# calculate total annual generation in each category
utilization = (
dispatch.copy().drop(columns="Nodal_Price").groupby("generation_project").sum()
dispatch.copy().drop(columns=["Nodal_Price"]).groupby("generation_project").sum()
)

# sum all rows
utilization["Total"] = utilization.sum(axis=1)
utilization["Total"] = utilization.drop(columns=["timestamp","gen_tech"]).sum(axis=1)

# drop rows with zero generation
utilization = utilization[utilization["Total"] > 0]
Expand Down Expand Up @@ -1448,7 +1448,7 @@ def build_month_hour_dispatch_plot(
"""

mh_dispatch = dispatch_by_tech.copy()
mh_dispatch = mh_dispatch.set_index("timestamp")
mh_dispatch = mh_dispatch.drop(columns=['Generation Type']).set_index("timestamp")

# groupby month and hour
mh_dispatch = mh_dispatch.groupby(
Expand Down Expand Up @@ -1770,6 +1770,7 @@ def build_open_position_plot(load_balance, storage_exists):
"""
# merge mismatch data
mismatch = load_balance.copy()
mismatch.drop(columns='load_zone', inplace=True)
mismatch["timestamp"] = pd.to_datetime(mismatch["timestamp"])

mismatch["Net generation"] = (
Expand Down
13 changes: 7 additions & 6 deletions match_model/reporting/summary_report.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
"source": [
"# Copyright (c) 2022 The MATCH Authors. All rights reserved.\n",
"# Licensed under the GNU AFFERO GENERAL PUBLIC LICENSE Version 3 (or later), which is in the LICENSE file.\n",
"\n",
"print('entered summary_report.py')\n",
"from pathlib import Path\n",
"import pandas as pd\n",
"import plotly.express as px\n",
Expand All @@ -33,14 +33,14 @@
"from match_model.reporting.report_functions import *\n",
"\n",
"#get the name of the current directory to specify the scenario name and identify the output directory\n",
"scenario_name = str(Path.cwd()).split('\\\\')[-1]\n",
"scenario_name = Path.cwd().name\n",
"if scenario_name == 'inputs':\n",
" data_dir = Path.cwd() / '../outputs/'\n",
" inputs_dir = Path.cwd() / '../inputs/'\n",
" scenario_output_dir = Path.cwd() / '../summary_reports/'\n",
" scenario_name = 'N/A'\n",
"else:\n",
" data_dir = Path.cwd()/ f'../../outputs/{scenario_name}/'\n",
" data_dir = Path.cwd() / f'../../outputs/{scenario_name}/'\n",
" inputs_dir = Path.cwd() / f'../../inputs/{scenario_name}/'\n",
" scenario_output_dir = Path.cwd() / '../../summary_reports/'\n",
"\n",
Expand Down Expand Up @@ -786,7 +786,7 @@
"lr_generation_impact = lrmer_pivot.mul(addl_dispatch, axis=0, level=1).sum(axis=1)\n",
"# calculate annual total for each scenario\n",
"lr_generation_impact = (\n",
" lr_generation_impact.reset_index().groupby(\"cambium_scenario\").sum()\n",
" lr_generation_impact.reset_index().drop(columns=['timestamp']).groupby(\"cambium_scenario\").sum()\n",
")\n",
"lr_generation_impact = lr_generation_impact.rename(\n",
" columns={0: f'Generation {emissions_unit.split(\"/\")[0]}'}\n",
Expand All @@ -795,7 +795,8 @@
"# multiply the storage and mer data\n",
"lr_storage_impact = lrmer_pivot.mul(addl_storage_dispatch, axis=0, level=1).sum(axis=1)\n",
"# calculate annual total for each scenario\n",
"lr_storage_impact = lr_storage_impact.reset_index().groupby(\"cambium_scenario\").sum()\n",
"\n",
"lr_storage_impact = lr_storage_impact.reset_index().drop(columns=['timestamp']).groupby(\"cambium_scenario\").sum()\n",
"lr_storage_impact = lr_storage_impact.rename(\n",
" columns={0: f'Storage {emissions_unit.split(\"/\")[0]}'}\n",
")\n",
Expand Down Expand Up @@ -1068,7 +1069,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.8 | packaged by conda-forge | (main, Nov 22 2022, 08:16:33) [MSC v.1929 64 bit (AMD64)]"
"version": "3.10.12"
},
"vscode": {
"interpreter": {
Expand Down
21 changes: 15 additions & 6 deletions match_model/solve.py
Original file line number Diff line number Diff line change
Expand Up @@ -841,9 +841,18 @@ def solve(model):
# with its own solver object (e.g., with runph or a parallel solver server).
# In those cases, we don't want to go through the expense of creating an
# unused solver object, or get errors if the solver options are invalid.
model.solver = SolverFactory(
model.options.solver, solver_io=model.options.solver_io
)

# Uncomment Below if you are want to use old MATCH Model behaviour that works for windows
# model.solver = SolverFactory(
# model.options.solver, solver_io=model.options.solver_io
# )

# Use for Mac.
model.solver = SolverFactory(model.options.solver)
solvername = 'cbc'
solverpath_exe = os.getcwd() + '/cbc'
model.solver=SolverFactory(solvername,executable=solverpath_exe)


# patch for Pyomo < 4.2
# note: Pyomo added an options_string argument to solver.solve() in Pyomo 4.2 rev 10587.
Expand All @@ -857,7 +866,6 @@ def solve(model):
).items():
model.solver.options[k] = v

# import pdb; pdb.set_trace()
model.solver_manager = SolverManagerFactory(model.options.solver_manager)

# get solver arguments
Expand Down Expand Up @@ -899,8 +907,9 @@ def solve(model):

TempfileManager.tempdir = model.options.tempdir

results = model.solver_manager.solve(model, opt=model.solver, **solver_args)
# import pdb; pdb.set_trace()
# results = model.solver_manager.solve(model, opt=model.solver, **solver_args)
results = SolverFactory("cbc").solve(model)#, **solver_args) #add the solver options parameters


if model.options.verbose:
print(
Expand Down