diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..910c588 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,47 @@ +--- +name: Bug report +about: Create a report to help us improve +title: Bug report +labels: bug +assignees: ahurli, BainanXia, danielolsen, jon-hagg, rouille + +--- + +# :beetle: + +- [ ] I have checked that this issue has not already been reported. + + +### Bug summary +A short 1-2 sentences that succinctly describes the bug. + +### Code for reproduction +A minimum code snippet required to reproduce the bug. Please make sure to minimize the +number of dependencies required. +```python +# Paste your code here +# +# +``` + +### Actual outcome +The output produced by the above code, which may be a screenshot, console output, etc. +```shell +# If applicable, paste the console output here +# +# +``` + +### Expected outcome +A description of the expected outcome from the code snippet. + +### Environment +Please specify your platform and versions of the relevant libraries you are using: +* Operating system: +* SwitchWrapper revision (run `git rev-parse origin/HEAD`): +* Python version: +* Jupyter version (if applicable): +* Other libraries: + +### Additional context +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..eed0380 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,36 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: Feature request +labels: feature request +assignees: ahurli, BainanXia, danielolsen, jon-hagg, rouille + +--- + + # :rocket: + +- [ ] Is your feature request essential for your project? + + +### Describe the workflow you want to enable +A clear and concise description of what can be enhanced, e.g., "I wish I could do [...]" + +### Describe your proposed implementation +This should provide a description of the feature request, e.g.: +* "The class `Foo` should have a new method `bar` that allows to [...]" +* "Function `foo` needs a new arguments `bar` to set [...]" +* "Create a new function `foo` to calculate [...]" + +If applicable, try to write a docstring for the desired feature. To illustrate, if you would like to add a new function in a module, provide: +* the name of the function +* a description of the task accomplished by the function +* a list of the input and output parameters together with their types (e.g., `int`, + `str`, `pandas.DataFrame`, etc.) and a short description of its/their meaning + +### Describe alternatives you've considered, if relevant +This should provide a description of any alternative solutions or features you've +considered. + +### Additional context +Add any other context or screenshots in this section, e.g., a plot from an article you +believe would clearly communicate results. diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000..85a1056 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,21 @@ +[Pull Request doc](https://breakthrough-energy.github.io/docs/user/git_guide.html#d-pull-request) + +### Purpose +What is the larger goal of this change? + +### What the code is doing +How is the purpose executed? + +### Testing +How did you test this change (unit/functional testing, manual testing, etc.)? + +### Where to look +* It's helpful to clarify where your new code lives if you moved files around or there could be confusion/ + +* What files are most important? + +### Usage Example/Visuals +How the code can be used and/or images of any graphs, tables or other visuals (not always applicable). + +### Time estimate +How long will it take for reviewers and observers to understand this code change? diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 0000000..c588509 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,21 @@ +name: Lint + +on: push + +jobs: + formatting: + if: "!contains(github.event.head_commit.message, 'skip_ci')" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + - run: python -m pip install --upgrade tox + - run: tox -e checkformatting + flake8: + if: "!contains(github.event.head_commit.message, 'skip_ci')" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + - run: python -m pip install --upgrade tox + - run: tox -e flake8 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..96e9652 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,25 @@ +name: Pytest + +on: [push] + +jobs: + build: + if: "!contains(github.event.head_commit.message, 'skip_ci')" + + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: [3.7, 3.8, 3.9] + + steps: + - name: Checkout SwitchWrapper + uses: actions/checkout@v2 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - run: python -m pip install --upgrade tox + - run: tox -e pytest diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e521d97 --- /dev/null +++ b/.gitignore @@ -0,0 +1,139 @@ +# The remainder of this file taken from github/gitignore +# https://github.com/github/gitignore/blob/master/Python.gitignore + + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# Editors +.vscode/ +.idea/ + +# Mac/OSX +.DS_Store + +# Windows +Thumbs.db diff --git a/Pipfile b/Pipfile new file mode 100644 index 0000000..64a840d --- /dev/null +++ b/Pipfile @@ -0,0 +1,14 @@ +[[source]] +name = "pypi" +url = "https://pypi.org/simple" +verify_ssl = true + +[dev-packages] +black = "*" +pytest = "*" + +[packages] +haversine = "~=2.3" +pandas = "~=1.2" +powersimdata = {editable = true, git = "https://github.com/Breakthrough-Energy/PowerSimData"} +switch-model = "==2.0.6" diff --git a/Pipfile.lock b/Pipfile.lock new file mode 100644 index 0000000..7258871 --- /dev/null +++ b/Pipfile.lock @@ -0,0 +1,434 @@ +{ + "_meta": { + "hash": { + "sha256": "a52fc31f82e08dfdf76593b601b6e866cf56c7bcdf9dbe1c78d86ac637cec173" + }, + "pipfile-spec": 6, + "requires": { + "python_version": "3.8" + }, + "sources": [ + { + "name": "pypi", + "url": "https://pypi.org/simple", + "verify_ssl": true + } + ] + }, + "default": { + "appdirs": { + "hashes": [ + "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41", + "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128" + ], + "version": "==1.4.4" + }, + "atomicwrites": { + "hashes": [ + "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197", + "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a" + ], + "markers": "sys_platform == 'win32'", + "version": "==1.4.0" + }, + "attrs": { + "hashes": [ + "sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1", + "sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==21.2.0" + }, + "bcrypt": { + "hashes": [ + "sha256:5b93c1726e50a93a033c36e5ca7fdcd29a5c7395af50a6892f5d9e7c6cfbfb29", + "sha256:63d4e3ff96188e5898779b6057878fecf3f11cfe6ec3b313ea09955d587ec7a7", + "sha256:81fec756feff5b6818ea7ab031205e1d323d8943d237303baca2c5f9c7846f34", + "sha256:a67fb841b35c28a59cebed05fbd3e80eea26e6d75851f0574a9273c80f3e9b55", + "sha256:c95d4cbebffafcdd28bd28bb4e25b31c50f6da605c81ffd9ad8a3d1b2ab7b1b6", + "sha256:cd1ea2ff3038509ea95f687256c46b79f5fc382ad0aa3664d200047546d511d1", + "sha256:cdcdcb3972027f83fe24a48b1e90ea4b584d35f1cc279d76de6fc4b13376239d" + ], + "markers": "python_version >= '3.6'", + "version": "==3.2.0" + }, + "certifi": { + "hashes": [ + "sha256:1a4995114262bffbc2413b159f2a1a480c969de6e6eb13ee966d470af86af59c", + "sha256:719a74fb9e33b9bd44cc7f3a8d94bc35e4049deebe19ba7d8e108280cfd59830" + ], + "version": "==2020.12.5" + }, + "cffi": { + "hashes": [ + "sha256:005a36f41773e148deac64b08f233873a4d0c18b053d37da83f6af4d9087b813", + "sha256:0857f0ae312d855239a55c81ef453ee8fd24136eaba8e87a2eceba644c0d4c06", + "sha256:1071534bbbf8cbb31b498d5d9db0f274f2f7a865adca4ae429e147ba40f73dea", + "sha256:158d0d15119b4b7ff6b926536763dc0714313aa59e320ddf787502c70c4d4bee", + "sha256:1f436816fc868b098b0d63b8920de7d208c90a67212546d02f84fe78a9c26396", + "sha256:2894f2df484ff56d717bead0a5c2abb6b9d2bf26d6960c4604d5c48bbc30ee73", + "sha256:29314480e958fd8aab22e4a58b355b629c59bf5f2ac2492b61e3dc06d8c7a315", + "sha256:34eff4b97f3d982fb93e2831e6750127d1355a923ebaeeb565407b3d2f8d41a1", + "sha256:35f27e6eb43380fa080dccf676dece30bef72e4a67617ffda586641cd4508d49", + "sha256:3d3dd4c9e559eb172ecf00a2a7517e97d1e96de2a5e610bd9b68cea3925b4892", + "sha256:43e0b9d9e2c9e5d152946b9c5fe062c151614b262fda2e7b201204de0b99e482", + "sha256:48e1c69bbacfc3d932221851b39d49e81567a4d4aac3b21258d9c24578280058", + "sha256:51182f8927c5af975fece87b1b369f722c570fe169f9880764b1ee3bca8347b5", + "sha256:58e3f59d583d413809d60779492342801d6e82fefb89c86a38e040c16883be53", + "sha256:5de7970188bb46b7bf9858eb6890aad302577a5f6f75091fd7cdd3ef13ef3045", + "sha256:65fa59693c62cf06e45ddbb822165394a288edce9e276647f0046e1ec26920f3", + "sha256:69e395c24fc60aad6bb4fa7e583698ea6cc684648e1ffb7fe85e3c1ca131a7d5", + "sha256:6c97d7350133666fbb5cf4abdc1178c812cb205dc6f41d174a7b0f18fb93337e", + "sha256:6e4714cc64f474e4d6e37cfff31a814b509a35cb17de4fb1999907575684479c", + "sha256:72d8d3ef52c208ee1c7b2e341f7d71c6fd3157138abf1a95166e6165dd5d4369", + "sha256:8ae6299f6c68de06f136f1f9e69458eae58f1dacf10af5c17353eae03aa0d827", + "sha256:8b198cec6c72df5289c05b05b8b0969819783f9418e0409865dac47288d2a053", + "sha256:99cd03ae7988a93dd00bcd9d0b75e1f6c426063d6f03d2f90b89e29b25b82dfa", + "sha256:9cf8022fb8d07a97c178b02327b284521c7708d7c71a9c9c355c178ac4bbd3d4", + "sha256:9de2e279153a443c656f2defd67769e6d1e4163952b3c622dcea5b08a6405322", + "sha256:9e93e79c2551ff263400e1e4be085a1210e12073a31c2011dbbda14bda0c6132", + "sha256:9ff227395193126d82e60319a673a037d5de84633f11279e336f9c0f189ecc62", + "sha256:a465da611f6fa124963b91bf432d960a555563efe4ed1cc403ba5077b15370aa", + "sha256:ad17025d226ee5beec591b52800c11680fca3df50b8b29fe51d882576e039ee0", + "sha256:afb29c1ba2e5a3736f1c301d9d0abe3ec8b86957d04ddfa9d7a6a42b9367e396", + "sha256:b85eb46a81787c50650f2392b9b4ef23e1f126313b9e0e9013b35c15e4288e2e", + "sha256:bb89f306e5da99f4d922728ddcd6f7fcebb3241fc40edebcb7284d7514741991", + "sha256:cbde590d4faaa07c72bf979734738f328d239913ba3e043b1e98fe9a39f8b2b6", + "sha256:cd2868886d547469123fadc46eac7ea5253ea7fcb139f12e1dfc2bbd406427d1", + "sha256:d42b11d692e11b6634f7613ad8df5d6d5f8875f5d48939520d351007b3c13406", + "sha256:f2d45f97ab6bb54753eab54fffe75aaf3de4ff2341c9daee1987ee1837636f1d", + "sha256:fd78e5fee591709f32ef6edb9a015b4aa1a5022598e36227500c8f4e02328d9c" + ], + "version": "==1.14.5" + }, + "chardet": { + "hashes": [ + "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa", + "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==4.0.0" + }, + "colorama": { + "hashes": [ + "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b", + "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2" + ], + "markers": "sys_platform == 'win32'", + "version": "==0.4.4" + }, + "cryptography": { + "hashes": [ + "sha256:0f1212a66329c80d68aeeb39b8a16d54ef57071bf22ff4e521657b27372e327d", + "sha256:1e056c28420c072c5e3cb36e2b23ee55e260cb04eee08f702e0edfec3fb51959", + "sha256:240f5c21aef0b73f40bb9f78d2caff73186700bf1bc6b94285699aff98cc16c6", + "sha256:26965837447f9c82f1855e0bc8bc4fb910240b6e0d16a664bb722df3b5b06873", + "sha256:37340614f8a5d2fb9aeea67fd159bfe4f5f4ed535b1090ce8ec428b2f15a11f2", + "sha256:3d10de8116d25649631977cb37da6cbdd2d6fa0e0281d014a5b7d337255ca713", + "sha256:3d8427734c781ea5f1b41d6589c293089704d4759e34597dce91014ac125aad1", + "sha256:7ec5d3b029f5fa2b179325908b9cd93db28ab7b85bb6c1db56b10e0b54235177", + "sha256:8e56e16617872b0957d1c9742a3f94b43533447fd78321514abbe7db216aa250", + "sha256:de4e5f7f68220d92b7637fc99847475b59154b7a1b3868fb7385337af54ac9ca", + "sha256:eb8cc2afe8b05acbd84a43905832ec78e7b3873fb124ca190f574dca7389a87d", + "sha256:ee77aa129f481be46f8d92a1a7db57269a2f23052d5f2433b4621bb457081cc9" + ], + "markers": "python_version >= '3.6'", + "version": "==3.4.7" + }, + "haversine": { + "hashes": [ + "sha256:75a7f859b3fb6df746564ca66ad1fd5b4052cdbab3d74ff16e8f1a7c3d4a26a5", + "sha256:f7e31d61d0ec4562a6fe1efe01304d8125ea27db2dd81b6757ae0974a5572842" + ], + "index": "pypi", + "version": "==2.3.1" + }, + "idna": { + "hashes": [ + "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6", + "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==2.10" + }, + "iniconfig": { + "hashes": [ + "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3", + "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32" + ], + "version": "==1.1.1" + }, + "nose": { + "hashes": [ + "sha256:9ff7c6cc443f8c51994b34a667bbcf45afd6d945be7477b52e97516fd17c53ac", + "sha256:dadcddc0aefbf99eea214e0f1232b94f2fa9bd98fa8353711dacb112bfcbbb2a", + "sha256:f1bffef9cbc82628f6e7d7b40d7e255aefaa1adb6a1b1d26c69a8b79e6208a98" + ], + "version": "==1.3.7" + }, + "numpy": { + "hashes": [ + "sha256:1676b0a292dd3c99e49305a16d7a9f42a4ab60ec522eac0d3dd20cdf362ac010", + "sha256:16f221035e8bd19b9dc9a57159e38d2dd060b48e93e1d843c49cb370b0f415fd", + "sha256:43909c8bb289c382170e0282158a38cf306a8ad2ff6dfadc447e90f9961bef43", + "sha256:4e465afc3b96dbc80cf4a5273e5e2b1e3451286361b4af70ce1adb2984d392f9", + "sha256:55b745fca0a5ab738647d0e4db099bd0a23279c32b31a783ad2ccea729e632df", + "sha256:5d050e1e4bc9ddb8656d7b4f414557720ddcca23a5b88dd7cff65e847864c400", + "sha256:637d827248f447e63585ca3f4a7d2dfaa882e094df6cfa177cc9cf9cd6cdf6d2", + "sha256:6690080810f77485667bfbff4f69d717c3be25e5b11bb2073e76bb3f578d99b4", + "sha256:66fbc6fed94a13b9801fb70b96ff30605ab0a123e775a5e7a26938b717c5d71a", + "sha256:67d44acb72c31a97a3d5d33d103ab06d8ac20770e1c5ad81bdb3f0c086a56cf6", + "sha256:6ca2b85a5997dabc38301a22ee43c82adcb53ff660b89ee88dded6b33687e1d8", + "sha256:6e51534e78d14b4a009a062641f465cfaba4fdcb046c3ac0b1f61dd97c861b1b", + "sha256:70eb5808127284c4e5c9e836208e09d685a7978b6a216db85960b1a112eeace8", + "sha256:830b044f4e64a76ba71448fce6e604c0fc47a0e54d8f6467be23749ac2cbd2fb", + "sha256:8b7bb4b9280da3b2856cb1fc425932f46fba609819ee1c62256f61799e6a51d2", + "sha256:a9c65473ebc342715cb2d7926ff1e202c26376c0dcaaee85a1fd4b8d8c1d3b2f", + "sha256:c1c09247ccea742525bdb5f4b5ceeacb34f95731647fe55774aa36557dbb5fa4", + "sha256:c5bf0e132acf7557fc9bb8ded8b53bbbbea8892f3c9a1738205878ca9434206a", + "sha256:db250fd3e90117e0312b611574cd1b3f78bec046783195075cbd7ba9c3d73f16", + "sha256:e515c9a93aebe27166ec9593411c58494fa98e5fcc219e47260d9ab8a1cc7f9f", + "sha256:e55185e51b18d788e49fe8305fd73ef4470596b33fc2c1ceb304566b99c71a69", + "sha256:ea9cff01e75a956dbee133fa8e5b68f2f92175233de2f88de3a682dd94deda65", + "sha256:f1452578d0516283c87608a5a5548b0cdde15b99650efdfd85182102ef7a7c17", + "sha256:f39a995e47cb8649673cfa0579fbdd1cdd33ea497d1728a6cb194d6252268e48" + ], + "markers": "python_version >= '3.7'", + "version": "==1.20.3" + }, + "packaging": { + "hashes": [ + "sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5", + "sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==20.9" + }, + "pandas": { + "hashes": [ + "sha256:167693a80abc8eb28051fbd184c1b7afd13ce2c727a5af47b048f1ea3afefff4", + "sha256:2111c25e69fa9365ba80bbf4f959400054b2771ac5d041ed19415a8b488dc70a", + "sha256:298f0553fd3ba8e002c4070a723a59cdb28eda579f3e243bc2ee397773f5398b", + "sha256:2b063d41803b6a19703b845609c0b700913593de067b552a8b24dd8eeb8c9895", + "sha256:2cb7e8f4f152f27dc93f30b5c7a98f6c748601ea65da359af734dd0cf3fa733f", + "sha256:52d2472acbb8a56819a87aafdb8b5b6d2b3386e15c95bde56b281882529a7ded", + "sha256:612add929bf3ba9d27b436cc8853f5acc337242d6b584203f207e364bb46cb12", + "sha256:649ecab692fade3cbfcf967ff936496b0cfba0af00a55dfaacd82bdda5cb2279", + "sha256:68d7baa80c74aaacbed597265ca2308f017859123231542ff8a5266d489e1858", + "sha256:8d4c74177c26aadcfb4fd1de6c1c43c2bf822b3e0fc7a9b409eeaf84b3e92aaa", + "sha256:971e2a414fce20cc5331fe791153513d076814d30a60cd7348466943e6e909e4", + "sha256:9db70ffa8b280bb4de83f9739d514cd0735825e79eef3a61d312420b9f16b758", + "sha256:b730add5267f873b3383c18cac4df2527ac4f0f0eed1c6cf37fcb437e25cf558", + "sha256:bd659c11a4578af740782288cac141a322057a2e36920016e0fc7b25c5a4b686", + "sha256:c601c6fdebc729df4438ec1f62275d6136a0dd14d332fc0e8ce3f7d2aadb4dd6", + "sha256:d0877407359811f7b853b548a614aacd7dea83b0c0c84620a9a643f180060950" + ], + "index": "pypi", + "version": "==1.2.4" + }, + "paramiko": { + "hashes": [ + "sha256:4f3e316fef2ac628b05097a637af35685183111d4bc1b5979bd397c2ab7b5898", + "sha256:7f36f4ba2c0d81d219f4595e35f70d56cc94f9ac40a6acdf51d6ca210ce65035" + ], + "version": "==2.7.2" + }, + "pint": { + "hashes": [ + "sha256:6593c5dfaf2f701c54f17453191dff05e90ec9ebc3d1901468a59cfcb3289a4c", + "sha256:f4d0caa713239e6847a7c6eefe2427358566451fe56497d533f21fb590a3f313" + ], + "markers": "python_version >= '3.6'", + "version": "==0.17" + }, + "pluggy": { + "hashes": [ + "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0", + "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==0.13.1" + }, + "ply": { + "hashes": [ + "sha256:00c7c1aaa88358b9c765b6d3000c6eec0ba42abca5351b095321aef446081da3", + "sha256:096f9b8350b65ebd2fd1346b12452efe5b9607f7482813ffca50c22722a807ce" + ], + "version": "==3.11" + }, + "powersimdata": { + "editable": true, + "git": "https://github.com/Breakthrough-Energy/PowerSimData", + "ref": "d60d7078a0ea6f3dbb787ffa71314c41c734f1a6" + }, + "py": { + "hashes": [ + "sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3", + "sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==1.10.0" + }, + "pycparser": { + "hashes": [ + "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0", + "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==2.20" + }, + "pynacl": { + "hashes": [ + "sha256:06cbb4d9b2c4bd3c8dc0d267416aaed79906e7b33f114ddbf0911969794b1cc4", + "sha256:11335f09060af52c97137d4ac54285bcb7df0cef29014a1a4efe64ac065434c4", + "sha256:2fe0fc5a2480361dcaf4e6e7cea00e078fcda07ba45f811b167e3f99e8cff574", + "sha256:30f9b96db44e09b3304f9ea95079b1b7316b2b4f3744fe3aaecccd95d547063d", + "sha256:4e10569f8cbed81cb7526ae137049759d2a8d57726d52c1a000a3ce366779634", + "sha256:511d269ee845037b95c9781aa702f90ccc36036f95d0f31373a6a79bd8242e25", + "sha256:537a7ccbea22905a0ab36ea58577b39d1fa9b1884869d173b5cf111f006f689f", + "sha256:54e9a2c849c742006516ad56a88f5c74bf2ce92c9f67435187c3c5953b346505", + "sha256:757250ddb3bff1eecd7e41e65f7f833a8405fede0194319f87899690624f2122", + "sha256:7757ae33dae81c300487591c68790dfb5145c7d03324000433d9a2c141f82af7", + "sha256:7c6092102219f59ff29788860ccb021e80fffd953920c4a8653889c029b2d420", + "sha256:8122ba5f2a2169ca5da936b2e5a511740ffb73979381b4229d9188f6dcb22f1f", + "sha256:9c4a7ea4fb81536c1b1f5cc44d54a296f96ae78c1ebd2311bd0b60be45a48d96", + "sha256:c914f78da4953b33d4685e3cdc7ce63401247a21425c16a39760e282075ac4a6", + "sha256:cd401ccbc2a249a47a3a1724c2918fcd04be1f7b54eb2a5a71ff915db0ac51c6", + "sha256:d452a6746f0a7e11121e64625109bc4468fc3100452817001dbe018bb8b08514", + "sha256:ea6841bc3a76fa4942ce00f3bda7d436fda21e2d91602b9e21b7ca9ecab8f3ff", + "sha256:f8851ab9041756003119368c1e6cd0b9c631f46d686b3904b18c0139f4419f80" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==1.4.0" + }, + "pyomo": { + "hashes": [ + "sha256:238bc5cacfaa5ad8cf77dcafcc3dd96370cf938ad62f3783748fd0d0ad156773", + "sha256:28cbe034b06a477053616a3ce5ef43149bfd7d025cac490c2a3dd006c388b60d", + "sha256:cba1c1598f07c6ed0755df7db61d09a46065d576419c0f1863d60d17f690515a" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==5.6.8" + }, + "pyparsing": { + "hashes": [ + "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", + "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" + ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2'", + "version": "==2.4.7" + }, + "pytest": { + "hashes": [ + "sha256:50bcad0a0b9c5a72c8e4e7c9855a3ad496ca6a881a3641b4260605450772c54b", + "sha256:91ef2131a9bd6be8f76f1f08eac5c5317221d6ad1e143ae03894b862e8976890" + ], + "index": "pypi", + "version": "==6.2.4" + }, + "python-dateutil": { + "hashes": [ + "sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c", + "sha256:75bb3f31ea686f1197762692a9ee6a7550b59fc6ca3a1f4b5d7e32fb98e2da2a" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'", + "version": "==2.8.1" + }, + "pytz": { + "hashes": [ + "sha256:83a4a90894bf38e243cf052c8b58f381bfe9a7a483f6a9cab140bc7f702ac4da", + "sha256:eb10ce3e7736052ed3623d49975ce333bcd712c7bb19a58b9e2089d4057d0798" + ], + "version": "==2021.1" + }, + "pyutilib": { + "hashes": [ + "sha256:38c552dea3f7b4c8dd7ce1741f7a63de720e99b44d9eb08932431d2ee4bcfb05", + "sha256:b253eca421c165ed9586b7e7b05c7a392f923dbe4b945cefef470b6c497f6bf3" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==5.7.3" + }, + "requests": { + "hashes": [ + "sha256:27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804", + "sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==2.25.1" + }, + "scipy": { + "hashes": [ + "sha256:01b38dec7e9f897d4db04f8de4e20f0f5be3feac98468188a0f47a991b796055", + "sha256:10dbcc7de03b8d635a1031cb18fd3eaa997969b64fdf78f99f19ac163a825445", + "sha256:19aeac1ad3e57338723f4657ac8520f41714804568f2e30bd547d684d72c392e", + "sha256:1b21c6e0dc97b1762590b70dee0daddb291271be0580384d39f02c480b78290a", + "sha256:1caade0ede6967cc675e235c41451f9fb89ae34319ddf4740194094ab736b88d", + "sha256:23995dfcf269ec3735e5a8c80cfceaf384369a47699df111a6246b83a55da582", + "sha256:2a799714bf1f791fb2650d73222b248d18d53fd40d6af2df2c898db048189606", + "sha256:3274ce145b5dc416c49c0cf8b6119f787f0965cd35e22058fe1932c09fe15d77", + "sha256:33d1677d46111cfa1c84b87472a0274dde9ef4a7ef2e1f155f012f5f1e995d8f", + "sha256:44d452850f77e65e25b1eb1ac01e25770323a782bfe3a1a3e43847ad4266d93d", + "sha256:9e3302149a369697c6aaea18b430b216e3c88f9a61b62869f6104881e5f9ef85", + "sha256:a75b014d3294fce26852a9d04ea27b5671d86736beb34acdfc05859246260707", + "sha256:ad7269254de06743fb4768f658753de47d8b54e4672c5ebe8612a007a088bd48", + "sha256:b30280fbc1fd8082ac822994a98632111810311a9ece71a0e48f739df3c555a2", + "sha256:b79104878003487e2b4639a20b9092b02e1bad07fc4cf924b495cf413748a777", + "sha256:d449d40e830366b4c612692ad19fbebb722b6b847f78a7b701b1e0d6cda3cc13", + "sha256:d647757373985207af3343301d89fe738d5a294435a4f2aafb04c13b4388c896", + "sha256:f68eb46b86b2c246af99fcaa6f6e37c7a7a413e1084a794990b877f2ff71f7b6", + "sha256:fdf606341cd798530b05705c87779606fcdfaf768a8129c348ea94441da15b04" + ], + "markers": "python_version < '3.10' and python_version >= '3.7'", + "version": "==1.6.3" + }, + "six": { + "hashes": [ + "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", + "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'", + "version": "==1.16.0" + }, + "switch-model": { + "hashes": [ + "sha256:179919ea051821be2e8369a7ea978a92341e8969c47962ab3cf84e55a73ff68b", + "sha256:37858ce3d5caa3e6fb91959d3548c0da5db2fff4e6d103a8ff3925b65f747a7e" + ], + "index": "pypi", + "version": "==2.0.6" + }, + "testfixtures": { + "hashes": [ + "sha256:5ec3a0dd6f71cc4c304fbc024a10cc293d3e0b852c868014b9f233203e149bda", + "sha256:9ed31e83f59619e2fa17df053b241e16e0608f4580f7b5a9333a0c9bdcc99137" + ], + "version": "==6.17.1" + }, + "toml": { + "hashes": [ + "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b", + "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f" + ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2'", + "version": "==0.10.2" + }, + "tqdm": { + "hashes": [ + "sha256:daec693491c52e9498632dfbe9ccfc4882a557f5fa08982db1b4d3adbe0887c3", + "sha256:ebdebdb95e3477ceea267decfc0784859aa3df3e27e22d23b83e9b272bf157ae" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==4.60.0" + }, + "urllib3": { + "hashes": [ + "sha256:2f4da4594db7e1e110a944bb1b551fdf4e6c136ad42e4234131391e21eb5b0df", + "sha256:e7b021f7241115872f92f43c6508082facffbd1c048e3c6e2bb9c2a157e28937" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'", + "version": "==1.26.4" + } + }, + "develop": {} +} diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..dfb3e0f --- /dev/null +++ b/requirements.txt @@ -0,0 +1,5 @@ +haversine~=2.3 +pandas~=1.2 +-e git+https://github.com/Breakthrough-Energy/PowerSimData#egg=PowerSimData +pytest +switch-model==2.0.6 diff --git a/switchwrapper/__init__.py b/switchwrapper/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/switchwrapper/call.py b/switchwrapper/call.py new file mode 100644 index 0000000..c193faa --- /dev/null +++ b/switchwrapper/call.py @@ -0,0 +1,60 @@ +import os +import subprocess +import sys + + +def launch_switch(input_folder, solver="gurobi", suffixes="dual", verbose=True): + """Launch switch using a folder of prepared input files. + + :param str input_folder: location containing the 'modules.txt' file and a subfolder + 'inputs' containing inputs CSVs. + :param str solver: the solver for Switch to use. If None, use GLPK (Switch default). + :param str suffixes: which additional information for Pyomo to collect from the + solver. If None, use the Switch default (none). + :param bool verbose: whether to pass the '--verbose' flag to Switch, to print more + information about the process of building and solving the model. + :raises TypeError: if ``input_folder`` is not a str, if ``solver`` or ``suffixes`` + are not str/None, or if ``verbose`` is not bool. + :raises ValueError: if ``input_folder`` does not point to a valid directory, or if + this directory does not contain a sub-directory named 'inputs'. + """ + # Validate inputs + if not isinstance(input_folder, str): + raise TypeError(f"input_folder must be a str, got {type(input_folder)}") + if not os.path.isdir(input_folder): + abspath = os.path.abspath(input_folder) + raise ValueError(f"input_folder must be a valid directory, got {abspath}") + inputs_subfolder = os.path.join(input_folder, "inputs") + if not os.path.isdir(inputs_subfolder): + raise ValueError("input_folder must contain a subdirectory named 'inputs'") + modules_filepath = os.path.join(input_folder, "modules.txt") + if not os.path.isfile(modules_filepath): + raise ValueError("input_folder must contain a file named 'modules.txt'") + if not isinstance(solver, str) and solver is not None: + raise TypeError("solver must be a str or None") + if not isinstance(suffixes, str) and suffixes is not None: + raise TypeError("suffixes must be a str or None") + if not isinstance(verbose, bool): + raise TypeError("verbose must be bool") + + # Construct subprocess call, starting with user-provided inputs + cmd = ["switch", "solve"] + if solver is not None: + cmd += ["--solver", solver] + if suffixes is not None: + cmd += ["--suffixes", suffixes] + if verbose: + cmd += ["--verbose"] + + # Then add inferred inputs about the folders + outputs_subfolder = os.path.join(input_folder, "outputs") + cmd += ["--inputs-dir", inputs_subfolder] + cmd += ["--module-list", modules_filepath] + cmd += ["--outputs-dir", outputs_subfolder] + + # Finally, launch + subprocess.run(cmd) + + +if __name__ == "__main__": + launch_switch(sys.argv[1]) diff --git a/switchwrapper/const.py b/switchwrapper/const.py new file mode 100644 index 0000000..4a6a586 --- /dev/null +++ b/switchwrapper/const.py @@ -0,0 +1,102 @@ +switch_modules = [ + "switch_model", + "switch_model.timescales", + "switch_model.financials", + "switch_model.balancing.load_zones", + "switch_model.energy_sources.properties", + "switch_model.generators.core.build", + "switch_model.generators.core.dispatch", + "switch_model.generators.core.no_commit", + "switch_model.energy_sources.fuel_costs.simple", + "switch_model.transmission.local_td", + "switch_model.transmission.transport.build", + "switch_model.transmission.transport.dispatch", + "switch_model.reporting", +] + +financial_parameters = { + "discount_rate": 0.079, + "interest_rate": 0.029, +} + +fuels = ["Coal", "NaturalGas", "Uranium", "Petroleum", "Other"] + +fuel_mapping = { + "wind": "Wind", + "wind_offshore": "Wind", + "solar": "Solar", + "hydro": "Water", + "geothermal": "Geothermal", + "coal": "Coal", + "ng": "NaturalGas", + "nuclear": "Uranium", + "dfo": "Petroleum", + "other": "Other", +} + +load_parameters = { + "existing_local_td": 99999, + "local_td_annual_cost_per_mw": 0, +} + +transmission_parameters = { + "trans_capital_cost_per_mw_km": 621, + "trans_lifetime_yrs": 40, + "trans_fixed_om_fraction": 0, + "distribution_loss_rate": 0, +} + +non_fuels = [ + "Wind", + "Solar", + "Water", + "Geothermal", +] + +investment_costs_by_type = { + "coal": 5.255e6, + "dfo": 1e7, + "geothermal": 7.696e6, + "hydro": 5.983e6, + "ng": 1.616e6, + "nuclear": 7.186e6, + "other": 1e7, + "solar": 1.6e6, + "wind": 1.956e6, + "wind_offshore": 5.342e6, +} + +assumed_pmins = { + "coal": None, + "default": 0, + "geothermal": 0.95, + "nuclear": 0.95, +} + +assumed_branch_efficiencies = { + 115: 0.9, + 138: 0.94, + 161: 0.96, + 230: 0.97, + 345: 0.98, + 500: 0.99, + 765: 0.99, + "default": 0.99, +} + +assumed_capacity_limits = { + "coal": 0, + "default": 5000, +} + +assumed_fuel_share_of_gencost = 0.7 + +assumed_ages_by_type = { + "hydro": 60, + "coal": 40, + "nuclear": 40, + "default": 20, +} + +baseload_types = {"coal", "nuclear"} +variable_types = {"hydro", "solar", "wind", "wind_offshore"} diff --git a/switchwrapper/grid_to_switch.py b/switchwrapper/grid_to_switch.py new file mode 100644 index 0000000..cf78e86 --- /dev/null +++ b/switchwrapper/grid_to_switch.py @@ -0,0 +1,509 @@ +import os + +import pandas as pd +from haversine import haversine + +from switchwrapper import const +from switchwrapper.helpers import make_indices + + +def grid_to_switch(grid, output_folder): + """Convert relevant data from a Grid object and command-line-prompted user inputs + to CSVs for use with Switch. + + :param powersimdata.input.grid.Grid grid: grid instance. + :param str output_folder: the location to save outputs, created as necessary. + """ + # First, prompt the user for information not contained in const or the passed grid + base_year = get_base_year() + inv_period, period_start, period_end = get_inv_periods() + + # Then, calculate information which feeds multiple data frames + cost_at_min_power, single_segment_slope = linearize_gencost(grid) + average_fuel_cost = calculate_average_fuel_cost(grid.plant) + + # Finally, generate and save data frames to CSVs + financials_filepath = os.path.join(output_folder, "financials.csv") + build_financials(base_year).to_csv(financials_filepath, index=False) + + fuels_filepath = os.path.join(output_folder, "fuels.csv") + build_fuels().to_csv(fuels_filepath, index=False) + + fuel_cost_filepath = os.path.join(output_folder, "fuel_cost.csv") + fuel_cost = build_fuel_cost(average_fuel_cost, base_year, inv_period) + fuel_cost.to_csv(fuel_cost_filepath, index=False) + + generation_projects_info_filepath = os.path.join( + output_folder, "generation_projects_info.csv" + ) + generation_project_info = build_generation_projects_info( + grid.plant, single_segment_slope, average_fuel_cost + ) + generation_project_info.to_csv(generation_projects_info_filepath, index=False) + + gen_build_costs_filepath = os.path.join(output_folder, "gen_build_costs.csv") + gen_build_costs = build_gen_build_costs(grid.plant, cost_at_min_power, inv_period) + gen_build_costs.to_csv(gen_build_costs_filepath, index=False) + + gen_build_predetermined_filepath = os.path.join( + output_folder, "gen_build_predetermined.csv" + ) + build_gen_build_predetermined(grid.plant).to_csv( + gen_build_predetermined_filepath, index=False + ) + + load_zones_filepath = os.path.join(output_folder, "load_zones.csv") + build_load_zones(grid.bus).to_csv(load_zones_filepath, index=False) + + non_fuel_energy_source_filepath = os.path.join( + output_folder, "non_fuel_energy_sources.csv" + ) + build_non_fuel_energy_source().to_csv(non_fuel_energy_source_filepath, index=False) + + periods_filepath = os.path.join(output_folder, "periods.csv") + build_periods(inv_period, period_start, period_end).to_csv( + periods_filepath, index=False + ) + + transmission_lines_filepath = os.path.join(output_folder, "transmission_lines.csv") + build_transmission_lines(grid).to_csv(transmission_lines_filepath, index=False) + + trans_params_filepath = os.path.join(output_folder, "trans_params.csv") + build_trans_params().to_csv(trans_params_filepath, index=False) + + +def get_base_year(): + """Prompt the user for a base year. + + :return: (*int*) -- base year. + """ + year = input("Please enter base study year (normally PowerSimData scenario year): ") + return int(year) + + +def get_inv_periods(): + """Prompt the user for investment stage, investment period, start year of each + period, end year of each period. + + :return: (*tuple*) -- 3-tuple of lists, investment periods, start years, end years + """ + while True: + num_inv_stages = input("Please enter the number of investment stages: ") + if not num_inv_stages.isdigit(): + print("number of investment stages must be an integer, please re-enter.") + else: + num_inv_stages = int(num_inv_stages) + break + if num_inv_stages == 1: + print("Single stage expansion identified.") + else: + print("Multi stage expansion identified.") + + while True: + inv_period = input( + "Please enter investment period year, separate by space: " + ).split() + if len(inv_period) == num_inv_stages: + try: + inv_period = [int(i) for i in inv_period] + break + except ValueError: + print("All investment period years must be integers, please re-enter.") + continue + print( + "investment period must match the number of investment stages, " + "please re-enter." + ) + + while True: + period_start = input( + "Please enter start year for each period, separate by space: " + ).split() + if len(period_start) == num_inv_stages: + try: + period_start = [int(p) for p in period_start] + break + except ValueError: + print("All start years must be integers, please re-enter.") + continue + print( + "start year for each period must match the number of investment stages, " + "please re-enter." + ) + + while True: + period_end = input( + "Please enter end year for each period, separate by space: " + ).split() + if len(period_end) == num_inv_stages: + try: + period_end = [int(p) for p in period_end] + break + except ValueError: + print("All end years must be integers, please re-enter.") + continue + print( + "end year for each period must match the number of investment stages, " + "please re-enter." + ) + return inv_period, period_start, period_end + + +def calculate_average_fuel_cost(plant): + """Calculate average fuel cost, by bus_id for buses containing generators. + + :param pandas.DataFrame plant: plant data from a Grid object. + :return: (*pandas.DataFrame*) -- data frame of average fuel cost by bus_id. + """ + plant_mod = plant.copy() + # Map our generator types to Switch fuel types + plant_mod["fuel"] = plant_mod["type"].map(const.fuel_mapping) + # Calculate the average fuel cost for each (bus_id, fuel) + relevant_fuel_columns = ["bus_id", "fuel", "GenFuelCost"] + fuel_cost = plant_mod[relevant_fuel_columns].groupby(["bus_id", "fuel"]).mean() + return fuel_cost + + +def linearize_gencost(grid): + """Calculate linearized cost parameters, incorporating assumed minimum generation. + + :param powersimdata.input.grid.Grid grid: grid instance. + :return: (*tuple*) -- two pandas Series objects, indexed by plant ID within ``grid``: + first is the cost of running each generator at minimum generation. + second is the single-segment linearized slope of each generator's cost curve. + """ + plant_mod = grid.plant.copy() + plant_mod.Pmin = plant_mod.apply( + lambda x: x.Pmax + * const.assumed_pmins.get(x.type, const.assumed_pmins["default"]) + if const.assumed_pmins.get(x.type, const.assumed_pmins["default"]) is not None + else x.Pmin, + axis=1, + ) + gencost = grid.gencost["before"] + cost_at_min_power = ( + gencost.c0 + gencost.c1 * plant_mod.Pmin + gencost.c2 * plant_mod.Pmin ** 2 + ) + cost_at_max_power = ( + gencost.c0 + gencost.c1 * plant_mod.Pmax + gencost.c2 * plant_mod.Pmax ** 2 + ) + single_segment_slope = (cost_at_max_power - cost_at_min_power) / ( + plant_mod.Pmax - plant_mod.Pmin + ) + single_segment_slope.fillna(0, inplace=True) + return cost_at_min_power, single_segment_slope + + +def build_financials(base_year): + """Parse financial parameters constants and base year input to a data frame. + + :param int/str base_year: Information to be added in the 'base_year' column. + :return: (*pandas.DataFrame*) -- single-row data frame with all params. + """ + financials = pd.DataFrame([const.financial_parameters]) + financials.insert(0, "base_financial_year", base_year) + return financials + + +def build_fuels(): + """Parse set of fuels to a data frame. + + :return: (*pandas.DataFrame*) -- single-row data frame with all params. + """ + fuels = pd.DataFrame({"fuel": const.fuels}) + fuels["co2_intensity"] = "0" + fuels["upstream_co2_intensity"] = "." + return fuels + + +def build_fuel_cost(average_fuel_cost, base_year, inv_period): + """Create a data frame of average fuel costs by zone and fuel, and project these + costs to future years. + + :param pandas.DataFrame average_fuel_cost: average fuel cost by bus_id. + :param list inv_period: list of investment period years, as integers. + :return: (*pandas.DataFrame*) -- data frame of fuel costs by period, zone, and fuel. + """ + fuel_cost = average_fuel_cost.copy() + # Retrieve the original `bus_id` and `fuel` columns, rename `bus_id` to `load_zone` + fuel_cost.reset_index(inplace=True) + fuel_cost.rename(columns={"bus_id": "load_zone"}, inplace=True) + # Duplicate each row N times, where N is the number of investment years + original_fuel_cost_length = len(fuel_cost) + fuel_cost = fuel_cost.loc[fuel_cost.index.repeat(len(inv_period))] + # Fill in different years and inflation values for the repeated rows + fuel_cost["period"] = inv_period * original_fuel_cost_length + inflation_factors = [ + (1 + const.financial_parameters["interest_rate"]) ** (year - base_year) + for year in inv_period + ] + fuel_cost["inflation"] = inflation_factors * original_fuel_cost_length + # Use inflation values to calculate future fuel costs + fuel_cost["fuel_cost"] = fuel_cost["GenFuelCost"] * fuel_cost["inflation"] + fuel_cost["fuel_cost"] = fuel_cost["fuel_cost"].round(2) + # Clean up columns we don't need + fuel_cost.drop(columns=["GenFuelCost", "inflation"], inplace=True) + # Clean up any rows we don't need + fuel_cost = fuel_cost.query("fuel_cost > 0 and fuel in @const.fuels") + + return fuel_cost + + +def build_generation_projects_info(plant, single_segment_slope, average_fuel_cost): + """Build data frame for generation_projects_info. + + :param pandas.DataFrame plant: data frame of current generators. + :param pandas.Series single_segment_slope: single-segment linearized slope of each + generator's cost curve, from :func:`linearize_gencost`. + :param pandas.DataFrame average_fuel_cost: average fuel cost by bus_id, from + :func:`calculate_average_fuel_cost`. + This is single-column ("GenFuelCost") and multi-index ("bus_id", "fuel"). + :return: (*pandas.DataFrame*) -- data frame of generation project info. + """ + # Extract information from inputs + original_plant_indices, hypothetical_plant_indices = make_indices(plant.index) + all_plant_indices = original_plant_indices + hypothetical_plant_indices + + # Use inputs for intermediate calculations + fuel_gencost = single_segment_slope * const.assumed_fuel_share_of_gencost + nonfuel_gencost = single_segment_slope * (1 - const.assumed_fuel_share_of_gencost) + fuel_cost_per_generator = plant.apply( + lambda x: average_fuel_cost.loc[ + (x.bus_id, const.fuel_mapping[x.type]), "GenFuelCost" + ], + axis=1, + ) + estimated_heatrate = (fuel_gencost / fuel_cost_per_generator).fillna(0) + + # Finally, construct data frame and return + df = pd.DataFrame(index=pd.Index(all_plant_indices, name="GENERATION_PROJECT")) + df["gen_tech"] = plant.type.tolist() * 2 + df["gen_load_zone"] = plant.bus_id.tolist() * 2 + df["gen_connect_cost_per_mw"] = 0 + df["gen_capacity_limit_mw"] = "." + df.loc[hypothetical_plant_indices, "gen_capacity_limit_mw"] = [ + const.assumed_capacity_limits.get(t, const.assumed_capacity_limits["default"]) + for t in plant.type.tolist() + ] + df["gen_full_load_heat_rate"] = estimated_heatrate.tolist() * 2 + df["gen_variable_om"] = nonfuel_gencost.tolist() * 2 + df["gen_max_age"] = [ + const.assumed_ages_by_type.get(t, const.assumed_ages_by_type["default"]) + for t in plant.type.tolist() * 2 + ] + df["gen_min_build_capacity"] = 0 + df["gen_scheduled_outage_rate"] = 0 + df["gen_forced_outage_rate"] = 0 + df["gen_is_variable"] = list(plant.type.isin(const.variable_types).astype(int)) * 2 + df["gen_is_baseload"] = list(plant.type.isin(const.baseload_types).astype(int)) * 2 + df["gen_is_cogen"] = 0 + df["gen_energy_source"] = plant.type.map(const.fuel_mapping).tolist() * 2 + df.loc[df.gen_energy_source.isin(const.non_fuels), "gen_full_load_heat_rate"] = "." + df["gen_unit_size"] = "." + df["gen_ccs_capture_efficiency"] = "." + df["gen_ccs_energy_load"] = "." + df["gen_storage_efficiency"] = "." + df["gen_store_to_release_ratio"] = "." + df.reset_index(inplace=True) + + return df + + +def build_gen_build_costs(plant, cost_at_min_power, inv_period): + """Build a data frame of generation projects, both existing and hypothetical. + + :param pandas.DataFrame plant: data frame of current generators. + :param pandas.Series cost_at_min_power: cost of running generator at minimum power. + :param list inv_period: list of investment period years. + :return: (*pandas.DataFrame*) -- data frame of existing and hypothetical generators. + """ + # Build lists for each columns, which apply to one year + original_plant_indices, hypothetical_plant_indices = make_indices(plant.index) + overnight_costs = plant["type"].map(const.investment_costs_by_type).tolist() + gen_fixed_om = (cost_at_min_power / plant.Pmax).fillna(0.0).tolist() + + # Extend these lists to multiple years + build_years = [2019] + inv_period + plant_index_lists = [original_plant_indices] + [ + hypothetical_plant_indices for i in inv_period + ] + all_indices = sum(plant_index_lists, []) + all_build_years = sum([[b] * len(original_plant_indices) for b in build_years], []) + all_overnight_costs = sum([overnight_costs for b in build_years], []) + all_gen_fixed_om = sum([gen_fixed_om for b in build_years], []) + + # Create a dataframe from the collected lists + gen_build_costs = pd.DataFrame( + { + "GENERATION_PROJECT": all_indices, + "build_year": all_build_years, + "gen_overnight_cost": all_overnight_costs, + "gen_fixed_om": all_gen_fixed_om, + } + ) + return gen_build_costs + + +def build_gen_build_predetermined(plant): + """Build a data frame of generator capacity and build year + + :param pandas.DataFrame plant: data frame of generators in a grid instance. + :return: (*pandas.DataFrame*) -- data frame of existing generators. + """ + gen_build_predetermined = plant["Pmax"].reset_index() + gen_build_predetermined["build_year"] = 2019 + gen_build_predetermined.rename( + columns={ + "plant_id": "GENERATION_PROJECT", + "Pmax": "gen_predetermined_cap", + }, + inplace=True, + ) + original_plant_indices, _ = make_indices(plant.index) + gen_build_predetermined["GENERATION_PROJECT"] = original_plant_indices + gen_build_predetermined = gen_build_predetermined[ + ["GENERATION_PROJECT", "build_year", "gen_predetermined_cap"] + ] + return gen_build_predetermined + + +def build_load_zones(bus): + """Parse bus data frame and load zone constants to a data frame. + + :param pandas.DataFrame bus: bus data from a Grid object. + :return: (*pandas.DataFrame*) -- data frame with constants added to bus indices. + """ + load_zones = bus.index.to_frame() + load_zones["dbid"] = range(1, len(load_zones) + 1) + for k, v in const.load_parameters.items(): + load_zones[k] = v + load_zones.rename(columns={"bus_id": "LOAD_ZONE"}, inplace=True) + return load_zones + + +def build_non_fuel_energy_source(): + """Parse list of non fuel energy sources to a data frame + + :return: (*pandas.DataFrame*) -- single column data frame with non-fuel energy + sources + """ + non_fuel_energy_source = pd.DataFrame({"energy_source": const.non_fuels}) + return non_fuel_energy_source + + +def build_periods(inv_period, period_start, period_end): + """Parse user input investment period information into a data frame. + + :param list inv_period: list of strings for each investment period year + :param list period_start: list of strings for start year of each period + :param list period_end: list of strings for end year of each period + :return: (*pandas.DataFrame*) -- periods data frame with investment period + information. + """ + periods = pd.DataFrame(columns=["INVESTMENT_PERIOD", "period_start", "period_end"]) + periods["INVESTMENT_PERIOD"] = inv_period + periods["period_start"] = period_start + periods["period_end"] = period_end + return periods + + +def branch_efficiency(from_bus_voltage, to_bus_voltage): + """Calculate branch efficiency based on start and end bus baseKV. + + :param int/float from_bus_voltage: start bus baseKV + :param int/float to_bus_voltage: end bus baseKV + :return: (*float*) -- efficiency rate of a branch + """ + if from_bus_voltage == to_bus_voltage: + return const.assumed_branch_efficiencies.get( + from_bus_voltage, const.assumed_branch_efficiencies["default"] + ) + else: + return const.assumed_branch_efficiencies["default"] + + +def build_aclines(grid): + """Create a data frame for ac transmission lines with required columns for + :func:`build_transmission_lines`. + + :param powersimdata.input.grid.Grid grid: grid instance + :return: (*pandas.DataFrame*) -- ac transmission line data frame + """ + acline = grid.branch[["from_bus_id", "to_bus_id", "rateA"]].reset_index() + acline["trans_length_km"] = list( + map( + haversine, + grid.bus.loc[acline["from_bus_id"], ["lat", "lon"]].values, + grid.bus.loc[acline["to_bus_id"], ["lat", "lon"]].values, + ) + ) + acline["trans_efficiency"] = list( + map( + branch_efficiency, + grid.bus.loc[acline["from_bus_id"], "baseKV"], + grid.bus.loc[acline["to_bus_id"], "baseKV"], + ) + ) + acline["branch_id"] = acline["branch_id"].apply(lambda x: str(x) + "ac") + return acline.round(2) + + +def build_dclines(grid): + """Create a data frame for dc transmission lines with required columns for + :func:`build_transmission_lines`. + + :param powersimdata.input.grid.Grid grid: grid instance + :return: (*pandas.DataFrame*) -- dc transmission line data frame + """ + dcline = grid.dcline[["from_bus_id", "to_bus_id", "Pmax"]].reset_index() + dcline["trans_length_km"] = list( + map( + haversine, + grid.bus.loc[dcline["from_bus_id"], ["lat", "lon"]].values, + grid.bus.loc[dcline["to_bus_id"], ["lat", "lon"]].values, + ) + ) + dcline["trans_efficiency"] = 0.99 + dcline["dcline_id"] = dcline["dcline_id"].apply(lambda x: str(x) + "dc") + dcline.rename(columns={"dcline_id": "branch_id", "Pmax": "rateA"}, inplace=True) + return dcline.round(2) + + +def build_transmission_lines(grid): + """Parse branch and dcline data frames of a grid instance into a transmission + line data frame with new columns for length and efficiency. + + :param powersimdata.input.grid.Grid grid: grid instance + :return: (*pandas.DataFrame*) -- transmission line data frame + """ + acline = build_aclines(grid) + dcline = build_dclines(grid) + transmission_line = pd.concat([dcline, acline], ignore_index=True) + transmission_line.rename( + columns={ + "branch_id": "TRANSMISSION_LINE", + "from_bus_id": "trans_lz1", + "to_bus_id": "trans_lz2", + "rateA": "existing_trans_cap", + }, + inplace=True, + ) + transmission_line = transmission_line[ + [ + "TRANSMISSION_LINE", + "trans_lz1", + "trans_lz2", + "trans_length_km", + "trans_efficiency", + "existing_trans_cap", + ] + ] + return transmission_line + + +def build_trans_params(): + """Parse transmission parameters constants to a data frame. + + :return: (*pandas.DataFrame*) -- single-row data frame with all params. + """ + return pd.DataFrame([const.transmission_parameters]) diff --git a/switchwrapper/helpers.py b/switchwrapper/helpers.py new file mode 100644 index 0000000..d8b1ae4 --- /dev/null +++ b/switchwrapper/helpers.py @@ -0,0 +1,10 @@ +def make_indices(plant_ids): + """Make the indices for existing and hypothetical generators for input to Switch. + + :param iterable plant_ids: plant IDs. + :return: (*tuple*) -- The first element is a list of indices for existing generators + and the second element is a list of indices for hypothetical generators. + """ + original_plant_indices = [f"g{p}" for p in plant_ids] + hypothetical_plant_indices = [f"{o}i" for o in original_plant_indices] + return original_plant_indices, hypothetical_plant_indices diff --git a/switchwrapper/prepare.py b/switchwrapper/prepare.py new file mode 100644 index 0000000..b841d08 --- /dev/null +++ b/switchwrapper/prepare.py @@ -0,0 +1,70 @@ +import os +import pickle + +import switch_model + +from switchwrapper import const +from switchwrapper.grid_to_switch import grid_to_switch +from switchwrapper.profiles_to_switch import _check_timepoints, profiles_to_switch + + +def prepare_inputs( + grid, + profiles, + timepoints, + timestamp_to_timepoints, + switch_files_root=None, +): + """Prepare all grid and profile data into a format expected by Switch. + + :param powersimdata.input.grid.Grid grid: grid instance. + :param dict profiles: keys are {"demand", "hydro", "solar", "wind"}, values are the + corresponding pandas data frames, indexed by hourly timestamp, with columns + representing plant IDs (for hydro, solar, and wind) or zone IDs (for demand). + :param pandas.DataFrame timepoints: data frame, indexed by timepoint_id, with + columns: 'timestamp', 'timeseries', 'ts_period', and 'ts_duration_of_tp'. + Each unique value in the 'timeseries' column must map to exactly one entry in + each of 'ts_period' and 'ts_duration_of_tp', as if these columns came from + another table in a relational database. + :param pandas.Series timestamp_to_timepoints: timepoints (values) of each timestamp + (index). + :param str switch_files_root: the location to save all Switch files. + """ + # Validate the input data + _check_timepoints(timepoints) + + # Create the 'inputs' folder, if it doesn't already exist + switch_files_root = os.getgwd() if switch_files_root is None else switch_files_root + inputs_folder = os.path.join(switch_files_root, "inputs") + os.makedirs(inputs_folder, exist_ok=True) + + grid_to_switch(grid, inputs_folder) + profiles_to_switch( + grid, profiles, timepoints, timestamp_to_timepoints, inputs_folder + ) + write_version_file(inputs_folder) + write_modules(switch_files_root) + + # Save a copy of the grid object for use in output processing + with open(os.path.join(inputs_folder, "grid.pkl"), "wb") as f: + pickle.dump(grid, f) + + +def write_modules(folder): + """Create a file containing a list of modules to be imported by Switch. + + :param str folder: the location to save the file. + """ + with open(os.path.join(folder, "modules.txt"), "w") as f: + for module in const.switch_modules: + f.write(f"{module}\n") + + +def write_version_file(folder): + """Create a switch_inputs_version.txt file in the inputs folder. + + :param str folder: the location to save the file. + """ + switch_version = switch_model.__version__ + with open(os.path.join(folder, "switch_inputs_version.txt"), "w") as f: + f.write(switch_version) diff --git a/switchwrapper/profiles_to_switch.py b/switchwrapper/profiles_to_switch.py new file mode 100644 index 0000000..84dda1a --- /dev/null +++ b/switchwrapper/profiles_to_switch.py @@ -0,0 +1,171 @@ +import os + +import pandas as pd + +from switchwrapper.helpers import make_indices + + +def profiles_to_switch( + grid, + profiles, + timepoints, + timestamp_to_timepoints, + output_folder, +): + """Using the provided mapping of hourly timestamps to timepoints, plus hourly + profiles, create and save CSVs which produce temporal data needed for Switch. + Inputs are indexed by 'timestamps', while outputs are 'timeseries', each of which + can contain multiple 'timepoints'. + + :param powersimdata.input.grid.Grid grid: grid instance. + :param dict profiles: keys are {"demand", "hydro", "solar", "wind"}, values are the + corresponding pandas data frames, indexed by hourly timestamp, with columns + representing plant IDs (for hydro, solar, and wind) or zone IDs (for demand). + :param pandas.DataFrame timepoints: data frame, indexed by timepoint_id, with + columns: 'timestamp', 'timeseries', 'ts_period', and 'ts_duration_of_tp'. + Each unique value in the 'timeseries' column must map to exactly one entry in + each of 'ts_period' and 'ts_duration_of_tp', as if these columns came from + another table in a relational database. + :param pandas.Series timestamp_to_timepoints: timepoints (values) of each timestamp + (index). + :param str output_folder: the location to save outputs, created as necessary. + """ + loads_filepath = os.path.join(output_folder, "loads.csv") + loads = build_loads(grid.bus, profiles["demand"], timestamp_to_timepoints) + loads.to_csv(loads_filepath) + + timepoints_filepath = os.path.join(output_folder, "timepoints.csv") + timepoints[["timestamp", "timeseries"]].to_csv(timepoints_filepath) + + timeseries_filepath = os.path.join(output_folder, "timeseries.csv") + timeseries = build_timeseries(timepoints, timestamp_to_timepoints) + timeseries.to_csv(timeseries_filepath, index=False) + + variable_capacity_factors_filepath = os.path.join( + output_folder, "variable_capacity_factors.csv" + ) + variable_profiles = {p: profiles[p] for p in {"hydro", "solar", "wind"}} + variable_capacity_factors = build_variable_capacity_factors( + variable_profiles, grid.plant, timestamp_to_timepoints + ) + variable_capacity_factors.to_csv(variable_capacity_factors_filepath, index=False) + + +def _check_timepoints(timepoints): + """Validate that a one-to-many relationship exists between the entries of the + 'timeseries' column and the entries of the 'ts_period' and 'ts_duration_of_tp' + columns. + + :param pandas.DataFrame timepoints: data frame, indexed by timepoint_id, with + columns: 'timestamp', 'timeseries', 'ts_period', and 'ts_duration_of_tp'. + :raises ValueError: if each unique value in the 'timeseries' column does not map to + exactly one entry in each of 'ts_period' and 'ts_duration_of_tp', as if these + columns came from another table in a relational database. + """ + timeseries_group_columns = ["timeseries", "ts_period", "ts_duration_of_tp"] + num_timeseries = len(timepoints["timeseries"].unique()) + num_timeseries_groups = len(timepoints.groupby(timeseries_group_columns)) + if num_timeseries != num_timeseries_groups: + raise ValueError( + "Each timeseries entry must have exactly one corresponding entry within the" + " ts_period and ts_duration_of_tp columns." + ) + + +def build_loads(bus, demand, timestamp_to_timepoints): + """Map timestamps to timepoints for demand data frame. + + :param pandas.DataFrame bus: bus data from a Grid object. + :param pandas.DataFrame demand: demand by timestamp (index) and zone IDs (columns). + :param pandas.Series timestamp_to_timepoints: timepoints (values) of each timestamp + (index). + :return: (*pandas.DataFrame*) -- data frame of demand at each bus/timepoint. + """ + # Distribute per-zone to demand to buses + bus_mod = bus.copy() + bus_mod["zone_Pd"] = bus_mod.groupby("zone_id")["Pd"].transform("sum") + bus_mod["zone_share"] = bus_mod["Pd"] / bus_mod["zone_Pd"] + zone_bus_shares = bus_mod.pivot(columns="zone_id", values="zone_share").fillna(0) + bus_demand = demand.dot(zone_bus_shares.T) + + # Calculate mean bus demand for each timepoint + bus_demand["TIMEPOINT"] = timestamp_to_timepoints.to_numpy() + timepoint_demand = bus_demand.groupby("TIMEPOINT").mean() + + # Convert from table of values to one row for each value + timepoint_demand = timepoint_demand.melt( + var_name="LOAD_ZONE", value_name="zone_demand_mw", ignore_index=False + ) + + # Set the index properly for Switch's expectations for the CSV + timepoint_demand.reset_index(inplace=True) + timepoint_demand.set_index("LOAD_ZONE", inplace=True) + + return timepoint_demand + + +def build_timeseries(timepoints, timestamp_to_timepoints): + """Add extra information to ``timeseries``, based on the information in + ``timestamp_to_timepoints`` and ``timepoints``. + + :param pandas.DataFrame timepoints: data frame, indexed by timepoint_id, with + columns: 'timestamp', 'timeseries', 'ts_period', and 'ts_duration_of_tp'. + :param pandas.Series timestamp_to_timepoints: timepoints (values) of each timestamp + (index). + :return: (*pandas.DataFrame*) -- data frame containing all timeseries information. + """ + timeseries = timepoints.groupby("timeseries").first().drop(columns="timestamp") + timeseries["ts_num_tps"] = timepoints.value_counts("timeseries") + # Count the number of hours mapped to each timeseries (via the timepoints) + hours = timestamp_to_timepoints.value_counts().groupby(timepoints.timeseries).sum() + timeseries["ts_scale_to_period"] = hours / ( + timeseries["ts_duration_of_tp"] * timeseries["ts_num_tps"] + ) + timeseries.index.name = "TIMESERIES" + timeseries.reset_index(inplace=True) + return timeseries + + +def build_variable_capacity_factors(gen_profiles, plant, timestamp_to_timepoints): + """Map timestamps to timepoints for variable generation data frames. + + :param dict gen_profiles: keys include {"hydro", "solar", "wind"}, values are the + corresponding pandas data frames, indexed by hourly timestamp, with columns + representing plant IDs. + :param pandas.DataFrame plant: plant data from a Grid object. + :param pandas.Series timestamp_to_timepoints: timepoints (values) of each timestamp + (index). + :return: (*pandas.DataFrame*) -- data frame generation at each plant/timepoint. + """ + # Constants + column_names = ["GENERATION_PROJECT", "timepoint", "gen_max_capacity_factor"] + + # Get normalized profiles for all variable plants + all_profiles = pd.concat(gen_profiles.values(), axis=1) + capacities = plant.loc[all_profiles.columns.tolist(), "Pmax"] + normalized_profiles = (all_profiles / capacities).fillna(0) + + # Aggregate timestamps to timepoints + normalized_profiles["timepoint"] = timestamp_to_timepoints.to_numpy() + variable_capacity_factors = normalized_profiles.groupby("timepoint").mean() + + # Convert from table of values to one row for each value + variable_capacity_factors = variable_capacity_factors.melt( + var_name="GENERATION_PROJECT", + value_name="gen_max_capacity_factor", + ignore_index=False, + ) + + # Re-order index & columns + variable_capacity_factors.reset_index(inplace=True) + variable_capacity_factors = variable_capacity_factors[column_names] + + # Copy profiles to apply to current and hypothetical plants + original_plant_indices, hypothetical_plant_indices = make_indices( + variable_capacity_factors["GENERATION_PROJECT"] + ) + all_plant_indices = original_plant_indices + hypothetical_plant_indices + variable_capacity_factors = pd.concat([variable_capacity_factors] * 2) + variable_capacity_factors["GENERATION_PROJECT"] = all_plant_indices + + return variable_capacity_factors diff --git a/switchwrapper/tests/__init__.py b/switchwrapper/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/switchwrapper/tests/test_grid_to_switch.py b/switchwrapper/tests/test_grid_to_switch.py new file mode 100644 index 0000000..201975f --- /dev/null +++ b/switchwrapper/tests/test_grid_to_switch.py @@ -0,0 +1,2 @@ +def test_placeholder(): + pass diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..32d4bbb --- /dev/null +++ b/tox.ini @@ -0,0 +1,28 @@ +[tox] +envlist = pytest, format, flake8 +skipsdist = true + +[testenv] +passenv = + CPPFLAGS + LDFLAGS +deps = + pytest: -rrequirements.txt + {format,checkformatting}: black + {format,checkformatting}: isort + flake8: flake8 + flake8: pep8-naming +changedir = switchwrapper +commands = + pytest: pytest + format: black . + format: isort . + checkformatting: black . --check --diff + checkformatting: isort --check --diff . + flake8: flake8 + +[flake8] +ignore = E501,W503 + +[isort] +profile = black