diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 8871ad9f6a7..e9ce087a0c9 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -23,7 +23,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: "3.10" + python-version: "3.12" - name: Install dependencies env: ALLOW_BOTORCH_LATEST: true diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index d85a3e43b32..f303d428820 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -37,29 +37,9 @@ jobs: pinned_botorch: false publish-latest-website: - - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: "3.10" - - name: Install dependencies - env: - ALLOW_BOTORCH_LATEST: true - ALLOW_LATEST_GPYTORCH_LINOP: true - run: | - # use latest BoTorch - pip install git+https://github.com/cornellius-gp/gpytorch.git - pip install git+https://github.com/pytorch/botorch.git - pip install -e ".[tutorial]" - - name: Publish latest website - env: - DOCUSAURUS_PUBLISH_TOKEN: ${{ secrets.DOCUSAURUS_PUBLISH_TOKEN }} - run: | - bash scripts/publish_site.sh -d + name: Publish latest website + uses: ./.github/workflows/publish_website.yml + secrets: inherit deploy-test-pypi: @@ -72,7 +52,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: "3.10" + python-version: "3.12" - name: Install dependencies env: ALLOW_BOTORCH_LATEST: true diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index d813d488d28..884e7386058 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -22,38 +22,53 @@ jobs: pinned_botorch: true secrets: inherit - publish-stable-website: - - needs: tests-and-coverage-pinned # only run if test step succeeds + check-versions: + needs: tests-and-coverage-pinned + name: Check if major or minor version changed runs-on: ubuntu-latest - + outputs: + major_minor_changed: ${{ steps.compare.outputs.major_minor_changed }} steps: - uses: actions/checkout@v4 - - name: Set up Python - uses: actions/setup-python@v5 with: - python-version: "3.10" - - name: Install dependencies - run: | - # use stable Botorch - pip install -e ".[tutorial]" - - name: Publish latest website - env: - DOCUSAURUS_PUBLISH_TOKEN: ${{ secrets.DOCUSAURUS_PUBLISH_TOKEN }} + fetch-depth: 0 + fetch-tags: true + ref: ${{ github.sha }} + - name: Check if major or minor version changed + id: compare run: | - bash scripts/publish_site.sh -d -v ${{ github.event.release.tag_name }} + git fetch --tags --force + previous_version=$(git describe --tags --abbrev=0 ${{ github.event.release.tag_name }}^) + prev=$(cut -d '.' -f 1-2 <<< $previous_version) # remove patch number + prev=${prev#v} # remove optional "v" prefix + next=$(cut -d '.' -f 1-2 <<< ${{ github.event.release.tag_name }}) + next=${next#v} - deploy: + echo "Updating from version $previous_version to ${{ github.event.release.tag_name }}" + if [[ "$prev" == "$next" ]]; then + echo "::warning::Major/Minor version was not changed. Skipping website & docs generation step." + else + echo major_minor_changed=true >> $GITHUB_OUTPUT + fi + version-and-publish-website: + needs: check-versions + name: Version and Publish website + if: ${{ needs.check-versions.outputs.major_minor_changed == 'true' }} + uses: ./.github/workflows/publish_website.yml + with: + new_version: ${{ github.event.release.tag_name }} + secrets: inherit + + deploy: needs: tests-and-coverage-pinned # only run if test step succeeds runs-on: ubuntu-latest - steps: - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v5 with: - python-version: "3.10" + python-version: "3.12" - name: Install dependencies run: | # use stable Botorch diff --git a/.github/workflows/publish_website.yml b/.github/workflows/publish_website.yml new file mode 100644 index 00000000000..e547ac40c38 --- /dev/null +++ b/.github/workflows/publish_website.yml @@ -0,0 +1,74 @@ +name: Publish Website + +on: + workflow_call: + inputs: + new_version: + required: false + type: string + run_tutorials: + required: false + type: boolean + default: false + workflow_dispatch: + + +jobs: + + build-website: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: 'docusaurus-versions' # release branch + fetch-depth: 0 + - name: Sync release branch with main + run: | + git config --global user.name "github-actions[bot]" + git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com" + git merge origin/main + # To avoid a large number of commits we don't push this sync commit to github until a new release. + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.12" + - name: Install dependencies + run: | + pip install -e ".[tutorial]" + - if: ${{ inputs.run_tutorials }} + name: Run Tutorials + run: | + python scripts/run_tutorials.py -w $(pwd) + - if: ${{ inputs.new_version }} + name: Create new docusaurus version + run: | + python3 scripts/convert_ipynb_to_mdx.py --clean + cd website + yarn + yarn docusaurus docs:version ${{ inputs.new_version }} + + git add --all + git commit -m "Create version ${{ inputs.new_version }} of site in Docusaurus" + git push --force origin HEAD:docusaurus-versions + - name: Build website + run: | + bash scripts/make_docs.sh -b + - name: Upload website build as artifact + id: deployment + uses: actions/upload-pages-artifact@v3 + with: + path: website/build/ + + deploy-website: + needs: build-website + permissions: + pages: write + id-token: write + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 diff --git a/.github/workflows/reusable_tutorials.yml b/.github/workflows/reusable_tutorials.yml index 4586309b257..570f4cd40ce 100644 --- a/.github/workflows/reusable_tutorials.yml +++ b/.github/workflows/reusable_tutorials.yml @@ -28,7 +28,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: "3.10" + python-version: "3.12" - if: ${{ inputs.pinned_botorch }} name: Install dependencies with pinned BoTorch @@ -46,10 +46,13 @@ jobs: pip install -e ".[tutorial]" - if: ${{ inputs.smoke_test }} - name: Build tutorials with smoke test + name: Run tutorials with smoke test run: | - python scripts/make_tutorials.py -w $(pwd) -e -s + python scripts/run_tutorials.py -w $(pwd) -s - if: ${{ !inputs.smoke_test }} - name: Build tutorials without smoke test + name: Run tutorials without smoke test run: | - python scripts/make_tutorials.py -w $(pwd) -e + python scripts/run_tutorials.py -w $(pwd) + - name: Build tutorials + run : | + python scripts/convert_ipynb_to_mdx.py --clean diff --git a/.gitignore b/.gitignore index 35e55125038..fa209866da6 100644 --- a/.gitignore +++ b/.gitignore @@ -131,18 +131,16 @@ sphinx/build/ tutorials/experiment.json tutorials/ax_client_snapshot.json tutorials/*.db +docs/tutorials/* +!docs/tutorials/index.mdx # Docusaurus site -website/yarn.lock +**/yarn.lock website/build/ website/i18n/ -website/node_modules/ - -## Generated for tutorials -website/_tutorials/ -website/static/files/ -website/pages/tutorials/* -!website/pages/tutorials/index.js +**/node_modules/ +.docusaurus +.cache-loader ## Generated for Sphinx website/pages/api/ diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 00000000000..24b599a3ffd --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,25 @@ +version: "2" + +build: + os: "ubuntu-22.04" + tools: + python: "3.12" + jobs: + post_install: + # Install latest botorch if not on a released version + - | + tag=$(eval "git name-rev --name-only --tags HEAD") + if [ $tag = "undefined" ]; then + pip install git+https://github.com/cornellius-gp/gpytorch.git + pip install git+https://github.com/pytorch/botorch.git + fi + +python: + install: + - method: pip + path: . + extra_requirements: + - dev + +sphinx: + configuration: sphinx/source/conf.py diff --git a/docs/algo-overview.md b/docs/algo-overview.md index 6fab6b9b3f7..2a3881639b0 100644 --- a/docs/algo-overview.md +++ b/docs/algo-overview.md @@ -2,8 +2,8 @@ id: algo-overview title: Overview --- - Ax supports: -* Bandit optimization - * Empirical Bayes with Thompson sampling -* Bayesian optimization + +- Bandit optimization + - Empirical Bayes with Thompson sampling +- Bayesian optimization diff --git a/docs/api.md b/docs/api.md index c3dace88022..d2cf871d514 100644 --- a/docs/api.md +++ b/docs/api.md @@ -3,6 +3,9 @@ id: api title: APIs --- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + The modular design of Ax enables three different usage modes, with different balances of structure to flexibility and reproducibility. Navigate to the ["Tutorials" page](/tutorials) for an in-depth walk-through of each API and @@ -14,53 +17,55 @@ we are in the process of consolidating Ax usage around it more formally. From most lightweight to fullest functionality, our APIs are: -- **Loop API** ([tutorial](/tutorials/gpei_hartmann_loop.html)) is intended for - synchronous optimization loops, where [trials](glossary.md#trial) can be - evaluated right away. With this API, optimization can be executed in a single - call and [experiment](glossary.md#experiment) introspection is available once - optimization is complete. **Use this API only for the simplest use cases where - running a single trial is fast and only one trial should be running at a - time.** -- **[RECOMMENDED] Service API** - ([tutorial](/tutorials/gpei_hartmann_service.html)) can be used as a - lightweight service for parameter-tuning applications where trials might be - evaluated in parallel and data is available asynchronously (e.g. - hyperparameter or simulation optimization). It requires little to no knowledge - of Ax data structures and easily integrates with various schedulers. In this - mode, Ax suggests one-[arm](glossary.md#arm) trials to be evaluated by the - client application, and expects them to be completed with - [metric](glossary.md#metric) data when available. **This is our most popular - API and a good place to start as a new user. Use it to leverage nearly full - hyperparameter optimization functionality of Ax without the need to learn its - architecture and how things work under the hood.** - - In both the Loop and the Service API, it is possible to configure the - optimization algorithm via an Ax `GenerationStrategy` - ([tutorial](/tutorials/generation_strategy.html)), so use of Developer API - is not required to control the optimization algorithm in Ax. -- **Developer API** ([tutorial](/tutorials/gpei_hartmann_developer.html)) is for - ad-hoc use by data scientists, machine learning engineers, and researchers. - The developer API allows for a great deal of customization and introspection, - and is recommended for those who plan to use Ax to optimize A/B tests. Using - the developer API requires some knowledge of [Ax architecture](core.md). **Use - this API if you are looking to perform field experiments with `BatchTrial`-s, - customize or contribute to Ax, or leverage advanced functionality that is not - exposed in other APIs.** - - While not an API, the **`Scheduler`** - ([tutorial](/tutorials/scheduler.html)) is an important and distinct - use-case of the Ax Developer API. With the `Scheduler`, it's possible to run - a configurable, managed closed-loop optimization where trials are deployed - and polled in an async fashion and no human intervention/oversight is - required until the experiment is complete. **Use the `Scheduler` when you - are looking to configure and start a full experiment that will need to - interact with an external system to evaluate trials.** +- **Loop API** ([tutorial](/tutorials/gpei_hartmann_loop.html)) is intended for + synchronous optimization loops, where [trials](glossary.md#trial) can be + evaluated right away. With this API, optimization can be executed in a single + call and [experiment](glossary.md#experiment) introspection is available once + optimization is complete. **Use this API only for the simplest use cases where + running a single trial is fast and only one trial should be running at a + time.** +- **[RECOMMENDED] Service API** + ([tutorial](/tutorials/gpei_hartmann_service.html)) can be used as a + lightweight service for parameter-tuning applications where trials might be + evaluated in parallel and data is available asynchronously (e.g. + hyperparameter or simulation optimization). It requires little to no knowledge + of Ax data structures and easily integrates with various schedulers. In this + mode, Ax suggests one-[arm](glossary.md#arm) trials to be evaluated by the + client application, and expects them to be completed with + [metric](glossary.md#metric) data when available. **This is our most popular + API and a good place to start as a new user. Use it to leverage nearly full + hyperparameter optimization functionality of Ax without the need to learn its + architecture and how things work under the hood.** + - In both the Loop and the Service API, it is possible to configure the + optimization algorithm via an Ax `GenerationStrategy` + ([tutorial](/tutorials/generation_strategy.html)), so use of Developer API + is not required to control the optimization algorithm in Ax. +- **Developer API** ([tutorial](/tutorials/gpei_hartmann_developer.html)) is for + ad-hoc use by data scientists, machine learning engineers, and researchers. + The developer API allows for a great deal of customization and introspection, + and is recommended for those who plan to use Ax to optimize A/B tests. Using + the developer API requires some knowledge of [Ax architecture](core.md). **Use + this API if you are looking to perform field experiments with `BatchTrial`-s, + customize or contribute to Ax, or leverage advanced functionality that is not + exposed in other APIs.** + - While not an API, the **`Scheduler`** + ([tutorial](/tutorials/scheduler.html)) is an important and distinct + use-case of the Ax Developer API. With the `Scheduler`, it's possible to run + a configurable, managed closed-loop optimization where trials are deployed + and polled in an async fashion and no human intervention/oversight is + required until the experiment is complete. **Use the `Scheduler` when you + are looking to configure and start a full experiment that will need to + interact with an external system to evaluate trials.** Here is a comparison of the three APIs in the simple case of evaluating the unconstrained synthetic Branin function: - - + + + ```py + from ax import optimize from ax.utils.measurement.synthetic_functions import branin @@ -80,11 +85,14 @@ best_parameters, values, experiment, model = optimize( evaluation_function=lambda p: (branin(p["x1"], p["x2"]), 0.0), minimize=True, ) + ``` - + + ```py + from ax.service.ax_client import AxClient, ObjectiveProperties from ax.utils.measurement.synthetic_functions import branin @@ -112,11 +120,14 @@ for _ in range(15): ax_client.complete_trial(trial_index=trial_index, raw_data=branin(parameters["x1"], parameters["x2"])) best_parameters, metrics = ax_client.get_best_parameters() + ``` - + + ```py + from ax import * @@ -164,11 +175,14 @@ for i in range(15): exp.fetch_data() best_parameters = best_arm.parameters + ``` - + + ```py + from ax import * from ax.modelbridge.generation_strategy import GenerationStrategy from ax.service import Scheduler @@ -185,6 +199,8 @@ scheduler = Scheduler( ) scheduler.run_n_trials(100) # Automate running 100 trials and reporting results + ``` - + + diff --git a/docs/banditopt.md b/docs/banditopt.md index da7614dffcb..7b8944dc6e7 100644 --- a/docs/banditopt.md +++ b/docs/banditopt.md @@ -2,14 +2,12 @@ id: banditopt title: Bandit Optimization --- - Many decision problems require choosing from a discrete set of candidates, and for these problems Ax uses bandit optimization. In contrast to [Bayesian optimization](bayesopt.md) — which provides a solution for problems with continuous parameters and an infinite number of potential options — bandit optimization is used for problems with a finite set of choices. Most ordinary A/B tests, in which a handful of options are evaluated against each other, fall into this category. Experimenters typically perform such tests by allocating a fixed percentage of experimental units to each choice, waiting to collect data about each, and then choosing a winner. In the case of an online system receiving incoming requests, this can be done by splitting traffic amongst the choices. However, with more than just a few options A/B tests quickly become prohibitively resource-intensive, largely because all choices — no matter how good or bad they appear — receive the same traffic allocation. Bandit optimization allocates traffic more efficiently among these discrete choices by sequentially updating the allocation of traffic based on each candidate's performance so far. The key problem for bandit optimization algorithms is balancing exploration (sending traffic to candidates that have the potential to perform well) with exploitation (sending traffic to candidates which already appear to perform well). This trade-off is very similar to the underlying exploration problem highlighted in Bayesian Optimization [acquisition functions](bayesopt.md#acquisition-functions). Bandit optimization is more sample efficient than traditional static A/B tests: it acquires a greater reward for the same amount of experimentation. Consequently, it is safer with larger cohorts because the samples are automatically diverted towards the good parameter values (and away from the bad ones). - ## How does it work? Ax relies on the simple and effective [Thompson sampling](https://en.wikipedia.org/wiki/Thompson_sampling) algorithm for performing bandit optimization. There is a clear intuition to this method: select a parameterization (referred to from now on as an "arm") with a probability proportional to that arm being the best. This algorithm is easy to implement and has strong guarantees of converging to an arm that is close to the best — all without any human intervention. To understand how this works, we describe an advertising optimization problem in which we want to choose arms which maximize the click-through rate (CTR) and the rewards are binary: either clicks (successes) or views without clicks (failures). @@ -30,16 +28,16 @@ Early in the process, the uncertainty in our estimates of CTR means that the ban We want a bandit algorithm to maximize the total rewards over time or equivalently, to minimize the regret, which is defined as the cumulative difference between the highest possible reward and the actual reward at a point in time. In our running example, regret is the number of clicks we "left on the table" through our choice of allocation procedure. We can imagine two extremes: -1. Pure exploration, in which we just always allocate users evenly across all conditions. This is the standard approach to A/B tests. -2. Pure exploitation, in which we simply allocate all users to the arm we think is most likely to be best. +1. Pure exploration, in which we just always allocate users evenly across all conditions. This is the standard approach to A/B tests. +2. Pure exploitation, in which we simply allocate all users to the arm we think is most likely to be best. Both of these extremes will do a poor job of minimizing our regret, so our aim is to balance them. The following figure compares the cumulative regret of three different approaches to bandit optimization for 200 rounds of experimentation on our running example: -1. Thompson sampling: the primary approach used by Ax, described above -2. Greedy: select the arm with the current best reward -3. Epsilon-greedy: randomly picks an arm $e$ percent of the time, picks the current best arm $100-e$ percent of the time +1. Thompson sampling: the primary approach used by Ax, described above +2. Greedy: select the arm with the current best reward +3. Epsilon-greedy: randomly picks an arm $e$ percent of the time, picks the current best arm $100-e$ percent of the time ![Bandit Optimization: Regret](assets/mab_regret.png) @@ -59,8 +57,8 @@ The diagram below illustrates how the estimates of two different experiments cha The experiment on the left has large effects relative to estimation variability, and so shrinkage (visualized here as distance from the dashed $y=x$ line), is very small. On the right side, however, we can see an experiment where shrinkage makes a significant difference. Effects far from the center of the distribution result in fairly substantial shrinkage, reducing the range of effects by nearly half. While effect estimates in the middle were largely unchanged, the largest observed effects went from around 17% before shrinkage to around 8% afterwards. -The vast majority of experimental groups are estimated more accurately using empirical Bayes. The arms which tend to have increases in error are those with the largest effects. Understating the effects of such arms is usually not a very big deal when making launch decisions, however, as one is usually most interested in *which* arm is the best rather than exactly how good it is. +The vast majority of experimental groups are estimated more accurately using empirical Bayes. The arms which tend to have increases in error are those with the largest effects. Understating the effects of such arms is usually not a very big deal when making launch decisions, however, as one is usually most interested in _which_ arm is the best rather than exactly how good it is. -Using Empirical Bayes does better at allocating users to the best arm than does using the raw effect estimates. It does this by concentrating exploration early in the experiment. In particular, it concentrates that exploration on the *set* of arms that look good, rather than over-exploiting the single best performing arm. By spreading exploration out a little bit more when effect estimates are noisy (and playing the best arm a little less), it is able to identify the best arm with more confidence later in the experiment. +Using Empirical Bayes does better at allocating users to the best arm than does using the raw effect estimates. It does this by concentrating exploration early in the experiment. In particular, it concentrates that exploration on the _set_ of arms that look good, rather than over-exploiting the single best performing arm. By spreading exploration out a little bit more when effect estimates are noisy (and playing the best arm a little less), it is able to identify the best arm with more confidence later in the experiment. See more [details in our paper](https://arxiv.org/abs/1904.12918). diff --git a/docs/bayesopt.md b/docs/bayesopt.md index 003774a105c..4b42b3faaff 100644 --- a/docs/bayesopt.md +++ b/docs/bayesopt.md @@ -2,17 +2,15 @@ id: bayesopt title: Bayesian Optimization --- - In complex engineering problems we often come across parameters that have to be tuned using several time-consuming and noisy evaluations. When the number of parameters is not small or some of the parameters are continuous, using large factorial designs (e.g., “grid search”) or global optimization techniques for optimization require more evaluations than is practically feasible. These types of problems show up in a diversity of applications, such as -1. Tuning Internet service parameters and selection of weights for recommender systems, -2. Hyperparameter optimization for machine learning, -3. Finding optimal set of gait parameters for locomotive control in robotics, and -4. Tuning design parameters and rule-of-thumb heuristics for hardware design. +1. Tuning Internet service parameters and selection of weights for recommender systems, +2. Hyperparameter optimization for machine learning, +3. Finding optimal set of gait parameters for locomotive control in robotics, and +4. Tuning design parameters and rule-of-thumb heuristics for hardware design. Bayesian optimization (BO) allows us to tune parameters in relatively few iterations by building a smooth model from an initial set of parameterizations (referred to as the "surrogate model") in order to predict the outcomes for as yet unexplored parameterizations. BO is an adaptive approach where the observations from previous evaluations are used to decide what parameterizations to evaluate next. The same strategy can be used to predict the expected gain from all future evaluations and decide on early termination, if the expected benefit is smaller than what is worthwhile for the problem at hand. - ## How does it work? Parameter tuning is often done with simple strategies like grid search. However, grid search scales very poorly with the number of parameters (the dimensionality of the parameter space) and generally does not work well for more than a couple of continuous parameters. Alternative global optimization techniques like DIRECT or genetic algorithms are more flexible, but also typically require more evaluations than is feasible, especially in the presence of uncertainty. @@ -23,24 +21,23 @@ The strategy of relying on successive surrogate models to update knowledge of th ![Gaussian process model fit to noisy data](assets/gp_opt.png) - Figure 1 shows a 1D example, where a surrogate model is fitted to five noisy observations using GPs to predict the objective (solid line) and place uncertainty estimates (proportional to the width of the shaded bands) over the entire x-axis, which represents the range of possible parameter values. The model is able to predict the outcome of configurations that have not yet been tested. As intuitively expected, the uncertainty bands are tight in regions that are well-explored and become wider as we move away from them. - ## Tradeoff between parallelism and total number of trials In Bayesian Optimization (any optimization, really), we have the choice between performing evaluations of our function in a sequential fashion (i.e. only generate a new candidate point to evaluate after the previous candidate has been evaluated), or in a parallel fashion (where we evaluate multiple candidates concurrently). The sequential approach will (in expectation) produce better optimization results, since at any point during the optimization the ML model that drives it uses strictly more information than the parallel approach. However, if function evaluations take a long time and end-to-end optimization time is important, then the parallel approach becomes attractive. The difference between the performance of a sequential (aka 'fully adaptive') algorithm and that of a (partially) parallelized algorithm is referred to as the 'adaptivity gap'. To balance end-to-end optimization time with finding the optimal solution in fewer trials, we opt for a ‘staggered’ approach by allowing a limited number of trials to be evaluated in parallel. By default, in simplified Ax APIs (e.g., in Service API) the allowed parallelism for the Bayesian phase of the optimization is 3. [Service API tutorial](https://ax.dev/tutorials/gpei_hartmann_service.html#How-many-trials-can-run-in-parallel?) has more information on how to handle and change allowed parallelism for that API. -For cases where its not too computationally expensive to run many trials (and therefore sample efficiency is less of a concern), higher parallelism can significantly speed up the end-to-end optimization time. By default, we recommend keeping the ratio of allowed parallelism to total trials relatively small (<10%) in order to not hurt optimization performance too much, but the reasonable ratio can differ depending on the specific setup. - +For cases where its not too computationally expensive to run many trials (and therefore sample efficiency is less of a concern), higher parallelism can significantly speed up the end-to-end optimization time. By default, we recommend keeping the ratio of allowed parallelism to total trials relatively small (<10%) in order to not hurt optimization performance too much, but the reasonable ratio can differ depending on the specific setup. ## Acquisition functions BoTorch — Ax's optimization engine — supports some of the most commonly used acquisition functions in BO like expected improvement (EI), probability of improvement, and upper confidence bound. Expected improvement is a popular acquisition function owing to its good practical performance and an analytic form that is easy to compute. As the name suggests it rewards evaluation of the objective $f$ based on the expected improvement relative to the current best. If $f^* = \max_i y_i$ is the current best observed outcome and our goal is to maximize $f$, then EI is defined as -$$ \text{EI}(x) = \mathbb{E}\bigl[\max(f(x) - f^*, 0)\bigr] $$ +$$ +\text{EI}(x) = \mathbb{E}\bigl[\max(f(x) - f^*, 0)\bigr] +$$ The parameterization with the highest EI is selected and evaluated in the next step. Using an acquisition function like EI to sample new points initially promotes quick exploration because its values, like the uncertainty estimates, are higher in unexplored regions. Once the parameter space is adequately explored, EI naturally narrows in on locations where there is a high likelihood of a good objective value. @@ -52,8 +49,8 @@ The above definition of the EI function assumes that the objective function is o How exactly do we model the true objective $f$ for making predictions about yet-to-be-explored regions using only a few noisy observations? GPs are a simple and powerful way of imposing assumptions over functions in the form of a probability distribution. The family of functions is characterized by, -1. A *mean function* that is the average of all functions, and, -2. A covariance or *kernel function* that provides an overall template for the look and feel of the individual functions (such as their shape or smoothness) and how much they can vary around the mean function. +1. A _mean function_ that is the average of all functions, and, +2. A covariance or _kernel function_ that provides an overall template for the look and feel of the individual functions (such as their shape or smoothness) and how much they can vary around the mean function. In most applications of BO, a radial basis function (RBF) or Matern kernel is used because they allow us the flexibility to fit a wide variety of functions in high dimensions. By default, BoTorch uses the Matern 5/2 kernel, which tends to allow for less smooth surfaces compared to the RBF. For more mathematical details and intuitions about GPs and the different kernels check out [this tutorial](https://distill.pub/2019/visual-exploration-gaussian-processes). diff --git a/docs/core.md b/docs/core.md index 4b4ad50018b..e3eb6488c96 100644 --- a/docs/core.md +++ b/docs/core.md @@ -2,7 +2,6 @@ id: core title: Core --- - ### Overview In Ax, an [experiment](glossary.md#experiment) keeps track of the whole optimization process. It contains a search space, optimization config, metadata, information on what metrics to track and how to run iterations, etc. An [experiment](glossary.md#experiment) is composed of a sequence of [trials](glossary.md#trial) each of which has a set of parameterizations (or [arms](glossary.md#arm)) to be evaluated. A [trial](glossary.md#trial) is added to the experiment when a new set of arms is proposed by the optimization algorithm. The trial is then evaluated to compute the values of each [metric](glossary.md#metric) for each arm, which are fed into the algorithms to create a new trial. Most applications have one arm per trial, which is the default implementation. @@ -17,11 +16,11 @@ An [experiment](glossary.md#experiment) consists of [trials](glossary.md#trial), ### Search Space and Parameters -A [search space](glossary.md#search-space) is composed of a set of [parameters](glossary.md#parameter) to be tuned in the experiment, and optionally a set of [parameter constraints](glossary.md#parameter-constraint) that define restrictions across these parameters (e.g. `p_a <= p_b`). Each parameter has a name, a type (```int```, ```float```, ```bool```, or ```string```), and a domain, which is a representation of the possible values the parameter can take. The search space is used by the optimization algorithms to know which arms are valid to suggest. +A [search space](glossary.md#search-space) is composed of a set of [parameters](glossary.md#parameter) to be tuned in the experiment, and optionally a set of [parameter constraints](glossary.md#parameter-constraint) that define restrictions across these parameters (e.g. `p_a <= p_b`). Each parameter has a name, a type (`int`, `float`, `bool`, or `string`), and a domain, which is a representation of the possible values the parameter can take. The search space is used by the optimization algorithms to know which arms are valid to suggest. Ax supports three types of parameters: -* **Range parameters**: must be of type `int` or `float`, and the domain is represented by a lower and upper bound. If the parameter is specified as an `int`, newly generated points are rounded to the nearest integer by default. +- **Range parameters**: must be of type `int` or `float`, and the domain is represented by a lower and upper bound. If the parameter is specified as an `int`, newly generated points are rounded to the nearest integer by default. ```python from ax import RangeParameter, ParameterType @@ -29,14 +28,14 @@ float_range_param = RangeParameter(name="x1", parameter_type=ParameterType.FLOAT int_range_param = RangeParameter(name="x2", parameter_type=ParameterType.INT, lower=0, upper=10) ``` -* **Choice parameters**: domain is a set of values +- **Choice parameters**: domain is a set of values ```python from ax import ChoiceParameter, ParameterType choice_param = ChoiceParameter(name="y", parameter_type=ParameterType.STRING, values=["foo", "bar"]) ``` -* **Fixed parameters**: domain is a single value +- **Fixed parameters**: domain is a single value ```python from ax import FixedParameter, ParameterType @@ -45,7 +44,7 @@ fixed_param = FixedParameter(name="z", parameter_type=ParameterType.BOOL, value= Ax supports three types of parameter constraints, each of which can only be used on `int` or `float` parameters: -* **Linear constraints**: `w * v` <= b where w is the vector of parameter weights, v is a vector of parameter values, * is the dot product, and b is the specified bound. Linear constraints are specified with the bound and a dictionary that maps parameter name to the weight +- **Linear constraints**: `w * v` <= b where w is the vector of parameter weights, v is a vector of parameter values, * is the dot product, and b is the specified bound. Linear constraints are specified with the bound and a dictionary that maps parameter name to the weight ```python from ax import ParameterConstraint @@ -57,7 +56,7 @@ param_b = RangeParameter(name="b", parameter_type=ParameterType.FLOAT, lower=0.0 con_1 = ParameterConstraint(constraint_dict={"a": 1.0, "b": 0.5}, bound=1.0) ``` -* **Order constraints**: specifies that one parameter must be smaller than the other +- **Order constraints**: specifies that one parameter must be smaller than the other ```python from ax import OrderConstraint @@ -66,7 +65,7 @@ from ax import OrderConstraint con_2 = OrderConstraint(lower_parameter=param_a, upper_parameter=param_b) ``` -* **Sum constraints**: specifies that the sum of the parameters must be greater or less than a bound +- **Sum constraints**: specifies that the sum of the parameters must be greater or less than a bound ```python from ax import SumConstraint @@ -160,12 +159,12 @@ experiment.new_batch_trial().add_generator_run(generator_run=GeneratorRun(...)) A trial goes through multiple phases during the experimentation cycle, tracked by its [`TrialStatus`](../api/core.html#ax.core.base_trial.TrialStatus) field. These stages are: -* `CANDIDATE` - Trial has just been created and can still be modified before deployment. -* `STAGED` - Relevant for external systems, where the trial configuration has been deployed but not begun the evaluation stage. -* `RUNNING` - Trial is in the process of being evaluated. -* `COMPLETED` - Trial completed evaluation successfully. -* `FAILED` - Trial incurred a failure while being evaluated. -* `ABANDONED` - User manually stopped the trial for some specified reason. +- `CANDIDATE` - Trial has just been created and can still be modified before deployment. +- `STAGED` - Relevant for external systems, where the trial configuration has been deployed but not begun the evaluation stage. +- `RUNNING` - Trial is in the process of being evaluated. +- `COMPLETED` - Trial completed evaluation successfully. +- `FAILED` - Trial incurred a failure while being evaluated. +- `ABANDONED` - User manually stopped the trial for some specified reason. When a trial is first created, its status is "candidate". If applicable, we can call `trial.mark_staged` to move the trial into "staged" mode. We then call `trial.run` to run the trial, which moves it into the "running" stage. We can then call diff --git a/docs/data.md b/docs/data.md index c30b97e0817..90582cba961 100644 --- a/docs/data.md +++ b/docs/data.md @@ -2,7 +2,6 @@ id: data title: Data --- - ## Fetching Data [Metrics](glossary.md#metric) provide an interface for fetching data for an experiment or trial. Experiment objectives and outcome constraints are special types of metrics, and you can also attach additional metrics for tracking purposes. @@ -14,7 +13,7 @@ To fetch data for an experiment or trial, use `exp.fetch_data` or `trial.fetch_d Each row of the final DataFrame represents the evaluation of an arm on a metric. As such, the required columns are: `arm_name`, `metric_name`, `mean`, and `sem`. Additional optional columns are also supported: `trial_index`, `start_time`, and `end_time`. | arm_name | metric_name | mean | sem | -|----------|-------------|------|-----| +| -------- | ----------- | ---- | --- | | 0_0 | metric1 | ... | ... | | 0_0 | metric2 | ... | ... | | 0_1 | metric1 | ... | ... | diff --git a/docs/glossary.md b/docs/glossary.md index edf23cfaa82..eef0e5caad1 100644 --- a/docs/glossary.md +++ b/docs/glossary.md @@ -4,49 +4,94 @@ title: Glossary sidebar_label: Glossary --- ### Arm -Mapping from [parameters](glossary.md#parameter) (i.e. a parameterization or parameter configuration) to parameter values. An arm provides the configuration to be tested in an Ax [trial](glossary.md#trial). Also known as "treatment group" or "parameterization", the name 'arm' comes from the [Multi-Armed Bandit](https://en.wikipedia.org/wiki/Multi-armed_bandit) optimization problem, in which a player facing a row of “one-armed bandit” slot machines has to choose which machines to play when and in what order. [```[Arm]```](/api/core.html#module-ax.core.arm) + +Mapping from [parameters](glossary.md#parameter) (i.e. a parameterization or parameter configuration) to parameter values. An arm provides the configuration to be tested in an Ax [trial](glossary.md#trial). Also known as "treatment group" or "parameterization", the name 'arm' comes from the [Multi-Armed Bandit](https://en.wikipedia.org/wiki/Multi-armed_bandit) optimization problem, in which a player facing a row of “one-armed bandit” slot machines has to choose which machines to play when and in what order. [`[Arm]`](/api/core.html#module-ax.core.arm) + ### Bandit optimization + Machine learning-driven version of A/B testing that dynamically allocates traffic to [arms](glossary.md#arm) which are performing well, to determine the best [arm](glossary.md#arm) among a given set. + ### Batch trial -Single step in the [experiment](glossary.md#experiment), contains multiple [arms](glossary.md#arm) that are **deployed and evaluated together**. A batch trial is not just a trial with many arms; it is a trial for which it is important that the arms are evaluated simultaneously, e.g. in an A/B test where the evaluation results are subject to nonstationarity. For cases where multiple arms are evaluated separately and independently of each other, use multiple regular [trials](glossary.md#trial) with a single arm each. [```[BatchTrial]```](/api/core.html#module-ax.core.batch_trial) + +Single step in the [experiment](glossary.md#experiment), contains multiple [arms](glossary.md#arm) that are **deployed and evaluated together**. A batch trial is not just a trial with many arms; it is a trial for which it is important that the arms are evaluated simultaneously, e.g. in an A/B test where the evaluation results are subject to nonstationarity. For cases where multiple arms are evaluated separately and independently of each other, use multiple regular [trials](glossary.md#trial) with a single arm each. [`[BatchTrial]`](/api/core.html#module-ax.core.batch_trial) + ### Bayesian optimization + Sequential optimization strategy for finding an optimal [arm](glossary.md#arm) in a continuous [search space](glossary.md#search-space). + ### Evaluation function + Function that takes a parameterization and an optional weight as input and outputs a set of metric evaluations ([more details](trial-evaluation.md#evaluation-function)). Used in the [Loop API](api.md). + ### Experiment -Object that keeps track of the whole optimization process. Contains a [search space](glossary.md#search-space), [optimization config](glossary.md#optimization-config), and other metadata. [```[Experiment]```](/api/core.html#module-ax.core.experiment) + +Object that keeps track of the whole optimization process. Contains a [search space](glossary.md#search-space), [optimization config](glossary.md#optimization-config), and other metadata. [`[Experiment]`](/api/core.html#module-ax.core.experiment) + ### Generation strategy -Abstraction that allows to declaratively specify one or multiple models to use in the course of the optimization and automate transition between them (relevant [tutorial](/tutorials/scheduler.html)). [```[GenerationStrategy]```](/api/modelbridge.html#module-ax.modelbridge.generation_strategy) + +Abstraction that allows to declaratively specify one or multiple models to use in the course of the optimization and automate transition between them (relevant [tutorial](/tutorials/scheduler.html)). [`[GenerationStrategy]`](/api/modelbridge.html#module-ax.modelbridge.generation_strategy) + ### Generator run -Outcome of a single run of the `gen` method of a [model bridge](glossary.md#model-bridge), contains the generated [arms](glossary.md#arm), as well as possibly best [arm](glossary.md#arm) predictions, other [model](glossary.md#model) predictions, fit times etc. [```[GeneratorRun]```](/api/core.html#module-ax.core.generator_run) + +Outcome of a single run of the `gen` method of a [model bridge](glossary.md#model-bridge), contains the generated [arms](glossary.md#arm), as well as possibly best [arm](glossary.md#arm) predictions, other [model](glossary.md#model) predictions, fit times etc. [`[GeneratorRun]`](/api/core.html#module-ax.core.generator_run) + ### Metric -Interface for fetching data for a specific measurement on an [experiment](glossary.md#experiment) or [trial](glossary.md#trial). [```[Metric]```](/api/core.html#module-ax.core.metric) + +Interface for fetching data for a specific measurement on an [experiment](glossary.md#experiment) or [trial](glossary.md#trial). [`[Metric]`](/api/core.html#module-ax.core.metric) + ### Model -Algorithm that can be used to generate new points in a [search space](glossary.md#search-space). [```[Model]```](/api/models.html) + +Algorithm that can be used to generate new points in a [search space](glossary.md#search-space). [`[Model]`](/api/models.html) + ### Model bridge -Adapter for interactions with a [model](glossary.md#model) within the Ax ecosystem. [```[ModelBridge]```](/api/modelbridge.html) + +Adapter for interactions with a [model](glossary.md#model) within the Ax ecosystem. [`[ModelBridge]`](/api/modelbridge.html) + ### Objective -The [metric](glossary.md#metric) to be optimized, with an optimization direction (maximize/minimize). [```[Objective]```](/api/core.html#module-ax.core.objective) + +The [metric](glossary.md#metric) to be optimized, with an optimization direction (maximize/minimize). [`[Objective]`](/api/core.html#module-ax.core.objective) + ### Optimization config -Contains information necessary to run an optimization, i.e. [objective](glossary.md#objective) and [outcome constraints](glossary#outcome-constraints). [```[OptimizationConfig]```](/api/core.html#module-ax.core.optimization_config) + +Contains information necessary to run an optimization, i.e. [objective](glossary.md#objective) and [outcome constraints](glossary#outcome-constraints). [`[OptimizationConfig]`](/api/core.html#module-ax.core.optimization_config) + ### Outcome constraint -Constraint on [metric](glossary.md#metric) values, can be an order constraint or a sum constraint; violating [arms](glossary.md#arm) will be considered infeasible. [```[OutcomeConstraint]```](/api/core.html#module-ax.core.outcome_constraint) + +Constraint on [metric](glossary.md#metric) values, can be an order constraint or a sum constraint; violating [arms](glossary.md#arm) will be considered infeasible. [`[OutcomeConstraint]`](/api/core.html#module-ax.core.outcome_constraint) + ### Parameter -Configurable quantity that can be assigned one of multiple possible values, can be continuous ([`RangeParameter`](../api/core.html#ax.core.parameter.RangeParameter)), discrete ([`ChoiceParameter`](../api/core.html#ax.core.parameter.ChoiceParameter)) or fixed ([`FixedParameter`](../api/core.html#ax.core.parameter.FixedParameter)). [```[Parameter]```](/api/core.html#module-ax.core.parameter) + +Configurable quantity that can be assigned one of multiple possible values, can be continuous ([`RangeParameter`](../api/core.html#ax.core.parameter.RangeParameter)), discrete ([`ChoiceParameter`](../api/core.html#ax.core.parameter.ChoiceParameter)) or fixed ([`FixedParameter`](../api/core.html#ax.core.parameter.FixedParameter)). [`[Parameter]`](/api/core.html#module-ax.core.parameter) + ### Parameter constraint -Places restrictions on the relationships between [parameters](glossary.md#parameter). For example `buffer_size1 < buffer_size2` or `buffer_size_1 + buffer_size_2 < 1024`. [```[ParameterConstraint]```](/api/core.html#module-ax.core.parameter_constraint) + +Places restrictions on the relationships between [parameters](glossary.md#parameter). For example `buffer_size1 < buffer_size2` or `buffer_size_1 + buffer_size_2 < 1024`. [`[ParameterConstraint]`](/api/core.html#module-ax.core.parameter_constraint) + ### Relative outcome constraint -[Outcome constraint](glossary.md#outcome-constraint) evaluated relative to the [status quo](glossary.md#status-quo) instead of directly on the metric value. [```[OutcomeConstraint]```](/api/core.html#module-ax.core.outcome_constraint) + +[Outcome constraint](glossary.md#outcome-constraint) evaluated relative to the [status quo](glossary.md#status-quo) instead of directly on the metric value. [`[OutcomeConstraint]`](/api/core.html#module-ax.core.outcome_constraint) + ### Runner -Dispatch abstraction that defines how a given [trial](glossary.md#trial) is to be run (either locally or by dispatching to an external system). [````[Runner]````](/api/core.html#module-ax.core.runner) + +Dispatch abstraction that defines how a given [trial](glossary.md#trial) is to be run (either locally or by dispatching to an external system). [`[Runner]`](/api/core.html#module-ax.core.runner) + ### Scheduler + Configurable closed-loop optimization manager class, capable of conducting a full experiment by deploying trials, polling their results, and leveraging those results to generate and deploy more -trials (relevant [tutorial](/tutorials/scheduler.html)). [````[Scheduler]````](https://ax.dev/versions/latest/api/service.html#module-ax.service.scheduler) +trials (relevant [tutorial](/tutorials/scheduler.html)). [`[Scheduler]`](https://ax.dev/versions/latest/api/service.html#module-ax.service.scheduler) + ### Search space -Continuous, discrete or mixed design space that defines the set of [parameters](glossary.md#parameter) to be tuned in the optimization, and optionally [parameter constraints](glossary.md#parameter-constraint) on these parameters. The parameters of the [arms](glossary.md#arm) to be evaluated in the optimization are drawn from a search space. [```[SearchSpace]```](/api/core.html#module-ax.core.search_space) + +Continuous, discrete or mixed design space that defines the set of [parameters](glossary.md#parameter) to be tuned in the optimization, and optionally [parameter constraints](glossary.md#parameter-constraint) on these parameters. The parameters of the [arms](glossary.md#arm) to be evaluated in the optimization are drawn from a search space. [`[SearchSpace]`](/api/core.html#module-ax.core.search_space) + ### SEM + [Standard error](https://en.wikipedia.org/wiki/Standard_error) of the [metric](glossary.md#metric)'s mean, 0.0 for noiseless measurements. If no value is provided, defaults to `np.nan`, in which case Ax infers its value using the measurements collected during experimentation. + ### Status quo -An [arm](glossary.md#arm), usually the currently deployed configuration, which provides a baseline for comparing all other [arms](glossary.md#arm). Also known as a control [arm](glossary.md#arm). [```[StatusQuo]```](/api/core.html#ax.core.experiment.Experiment.status_quo) + +An [arm](glossary.md#arm), usually the currently deployed configuration, which provides a baseline for comparing all other [arms](glossary.md#arm). Also known as a control [arm](glossary.md#arm). [`[StatusQuo]`](/api/core.html#ax.core.experiment.Experiment.status_quo) + ### Trial -Single step in the [experiment](glossary.md#experiment), contains a single [arm](glossary.md#arm). In cases where the trial contains multiple [arms](glossary.md#arm) that are deployed simultaneously, we refer to it as a [batch trial](glossary.md#batch-trial). [```[Trial]```](/api/core.html#module-ax.core.trial), [```[BatchTrial]```](/api/core.html#module-ax.core.batch_trial) + +Single step in the [experiment](glossary.md#experiment), contains a single [arm](glossary.md#arm). In cases where the trial contains multiple [arms](glossary.md#arm) that are deployed simultaneously, we refer to it as a [batch trial](glossary.md#batch-trial). [`[Trial]`](/api/core.html#module-ax.core.trial), [`[BatchTrial]`](/api/core.html#module-ax.core.batch_trial) diff --git a/docs/installation.md b/docs/installation.md index 78925a2f01a..6049a847c11 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -2,35 +2,36 @@ id: installation title: Installation --- - ## Requirements + You need Python 3.10 or later to run Ax. The required Python dependencies are: -* [botorch][def] -* jinja2 -* pandas -* scipy -* sklearn -* plotly >=2.2.1 +- [botorch][def] +- jinja2 +- pandas +- scipy +- sklearn +- plotly >=2.2.1 ## Stable Version ### Installing via pip + We recommend installing Ax via pip (even if using Conda environment): -``` +```shell conda install pytorch torchvision -c pytorch # OSX only (details below) pip install ax-platform ``` Installation will use Python wheels from PyPI, available for [OSX, Linux, and Windows](https://pypi.org/project/ax-platform/#files). -*Note*: Make sure the `pip` being used to install `ax-platform` is actually the one from the newly created Conda environment. +_Note_: Make sure the `pip` being used to install `ax-platform` is actually the one from the newly created Conda environment. If you're using a Unix-based OS, you can use `which pip` to check. -*Recommendation for MacOS users*: PyTorch is a required dependency of BoTorch, and can be automatically installed via pip. +_Recommendation for MacOS users_: PyTorch is a required dependency of BoTorch, and can be automatically installed via pip. However, **we recommend you [install PyTorch manually](https://pytorch.org/get-started/locally/#anaconda-1) before installing Ax, using the Anaconda package manager**. Installing from Anaconda will link against MKL (a library that optimizes mathematical computation for Intel processors). This will result in up to an order-of-magnitude speed-up for Bayesian optimization, whereas installing PyTorch from pip does not link against MKL. @@ -40,12 +41,14 @@ If you need CUDA on MacOS, you will need to build PyTorch from source. Please co ### Optional Dependencies To use Ax with a notebook environment, you will need Jupyter. Install it first: -``` + +```shell pip install jupyter ``` If you want to store the experiments in MySQL, you will need SQLAlchemy: -``` + +```shell pip install SQLAlchemy ``` @@ -55,29 +58,30 @@ pip install SQLAlchemy You can install the latest (bleeding edge) version from GitHub: -``` +```shell pip install 'git+https://github.com/facebook/Ax.git#egg=ax-platform' ``` See also the recommendation for installing PyTorch for MacOS users above. At times, the bleeding edge for Ax can depend on bleeding edge versions of BoTorch (or GPyTorch). We therefore recommend installing those from Git as well: -``` + +```shell pip install git+https://github.com/cornellius-gp/gpytorch.git pip install git+https://github.com/pytorch/botorch.git ``` ### Optional Dependencies - To use Ax with a notebook environment, you will need Jupyter. Install it first: -``` +```shell pip install 'git+https://github.com/facebook/Ax.git#egg=ax-platform[notebook]' ``` If storing Ax experiments via SQLAlchemy in MySQL or SQLite: -``` + +```shell pip install 'git+https://github.com/facebook/Ax.git#egg=ax-platform[mysql]' ``` @@ -85,7 +89,7 @@ pip install 'git+https://github.com/facebook/Ax.git#egg=ax-platform[mysql]' When contributing to Ax, we recommend cloning the [repository](https://github.com/facebook/Ax) and installing all optional dependencies: -``` +```shell # bleeding edge versions of GPyTorch + BoTorch are recommended pip install git+https://github.com/cornellius-gp/gpytorch.git pip install git+https://github.com/pytorch/botorch.git @@ -102,5 +106,4 @@ The above example limits the cloned directory size via the argument to `git clone`. If you require the entire commit history you may remove this argument. - [def]: https://www.botorch.org diff --git a/docs/models.md b/docs/models.md index 45b38f1ff17..0f754bf27dc 100644 --- a/docs/models.md +++ b/docs/models.md @@ -2,19 +2,21 @@ id: models title: Models --- - ## Using models in Ax In the optimization algorithms implemented by Ax, models predict the outcomes of metrics within an experiment evaluated at a parameterization, and are used to predict metrics or suggest new parameterizations for trials. Models in Ax are created using factory functions from the [`ax.modelbridge.factory`](../api/modelbridge.html#module-ax.modelbridge.factory). All of these models share a common API with [`predict()`](../api/modelbridge.html#ax.modelbridge.base.ModelBridge.predict) to make predictions at new points and [`gen()`](../api/modelbridge.html#ax.modelbridge.base.ModelBridge.gen) to generate new candidates to be tested. There are a variety of models available in the factory; here we describe the usage patterns for the primary model types and show how the various Ax utilities can be used with models. #### Sobol sequence + The [`get_sobol`](../api/modelbridge.html#ax.modelbridge.factory.get_sobol) function is used to construct a model that produces a quasirandom Sobol sequence when[`gen`](../api/modelbridge.html#ax.modelbridge.base.ModelBridge.gen) is called. This code generates a scrambled Sobol sequence of 10 points: -```Python + +```python from ax.modelbridge.factory import get_sobol m = get_sobol(search_space) gr = m.gen(n=10) ``` + The output of [`gen`](../api/modelbridge.html#ax.modelbridge.base.ModelBridge.gen) is a [`GeneratorRun`](../api/core.html#ax.core.generator_run.GeneratorRun) object that contains the generated points, along with metadata about the generation process. The generated arms can be accessed at [`GeneratorRun.arms`](../api/core.html#ax.core.generator_run.GeneratorRun.arms). Additional arguments can be passed to [`get_sobol`](../api/modelbridge.html#ax.modelbridge.factory.get_sobol) such as `scramble=False` to disable scrambling, and `seed` to set a seed (see [model API](../api/models.html#ax.models.random.sobol.SobolGenerator)). @@ -22,6 +24,7 @@ Additional arguments can be passed to [`get_sobol`](../api/modelbridge.html#ax.m Sobol sequences are typically used to select initialization points, and this model does not implement [`predict`](../api/modelbridge.html#ax.modelbridge.base.ModelBridge.predict). It can be used on search spaces with any combination of discrete and continuous parameters. #### Gaussian Process with EI + Gaussian Processes (GPs) are used for [Bayesian Optimization](bayesopt.md) in Ax, the [`Models.BOTORCH_MODULAR`](../api/modelbridge.html#ax.modelbridge.registry.Models) registry entry constructs a modular BoTorch model that fits a GP to the data, and uses qLogNEI (or qLogNEHVI for MOO) acquisition function to generate new points on calls to [`gen`](../api/modelbridge.html#ax.modelbridge.base.ModelBridge.gen). This code fits a GP and generates a batch of 5 points which maximizes EI: ```Python from ax.modelbridge.registry import Models @@ -31,7 +34,8 @@ gr = m.gen(n=5, optimization_config=optimization_config) ``` In contrast to [`get_sobol`](../api/modelbridge.html#ax.modelbridge.factory.get_sobol), the GP requires data and is able to make predictions. We make predictions by constructing a list of [`ObservationFeatures`](../api/core.html#ax.core.observation.ObservationFeatures) objects with the parameter values for which we want predictions: -```Python + +```python from ax.core.observation import ObservationFeatures obs_feats = [ @@ -40,10 +44,12 @@ obs_feats = [ ] f, cov = m.predict(obs_feats) ``` + The output of [`predict`](../api/modelbridge.html#ax.modelbridge.base.ModelBridge.predict) is the mean estimate of each metric and the covariance (across metrics) for each point. All Ax models that implement [`predict`](../api/modelbridge.html#ax.modelbridge.base.ModelBridge.predict) can be used with the built-in plotting utilities, which can produce plots of model predictions on 1-d or 2-d slices of the parameter space: -```Python + +```python from ax.plot.slice import plot_slice from ax.utils.notebook.plotting import render, init_notebook_plotting @@ -56,9 +62,9 @@ render(plot_slice( )) ``` -
+
-```Python +```python from ax.plot.contour import plot_contour render(plot_contour( @@ -69,23 +75,26 @@ render(plot_contour( )) ``` -
+
Ax also includes utilities for cross validation to assess model predictive performance. Leave-one-out cross validation can be performed as follows: -```Python + +```python from ax.modelbridge.cross_validation import cross_validate, compute_diagnostics cv = cross_validate(model) diagnostics = compute_diagnostics(cv) ``` + [`compute_diagnostics`](../api/modelbridge.html#ax.modelbridge.cross_validation.compute_diagnostics) computes a collection of diagnostics of model predictions, such as the correlation between predictions and actual values, and the p-value for a Fisher test of the model's ability to distinguish high values from low. A very useful tool for assessing model performance is to plot the cross validated predictions against the actual observed values: -```Python + +```python from ax.plot.diagnostic import interact_cross_validation render(interact_cross_validation(cv)) ``` -
+
If the model fits the data well, the values will lie along the diagonal. Poor GP fits tend to produce cross validation plots that are flat with high predictive uncertainty - such fits are unlikely to produce good candidates in [`gen`](../api/modelbridge.html#ax.modelbridge.base.ModelBridge.gen). @@ -99,43 +108,53 @@ In discrete spaces where the GP does not predict well, a multi-armed bandit appr The most common way of dealing with categorical variables in Bayesian optimization is to one-hot encode the categories to allow fitting a GP model in a continuous space. In this setting, a categorical variable with categories `["red", "blue", "green"]` is represented by three new variables (one for each category). While this is a convenient choice, it can drastically increase the dimensionality of the search space. In addition, the acquisition function is often optimized in the corresponding continuous space and the final candidate is selected by rounding back to the original space, which may result in selecting sub-optimal points according to the acquisition function. -Our new approach uses separate kernels for the categorical and ordinal (continuous/integer) variables. In particular, we use a kernel of the form: $$k(x, y) = k_\text{cat}(x_\text{cat}, y_\text{cat}) \times k_\text{ord}(x_\text{ord}, y_\text{ord}) + k_\text{cat}(x_\text{cat}, y_\text{cat}) + k_\text{ord}(x_\text{ord}, y_\text{ord})$$ +Our new approach uses separate kernels for the categorical and ordinal (continuous/integer) variables. In particular, we use a kernel of the form: +$$ +k(x, y) = k_\text{cat}(x_\text{cat}, y_\text{cat}) \times k_\text{ord}(x_\text{ord}, y_\text{ord}) + k_\text{cat}(x_\text{cat}, y_\text{cat}) + k_\text{ord}(x_\text{ord}, y_\text{ord}) +$$ For the ordinal variables we can use a standard kernel such as Matérn-5/2, but for the categorical variables we need a way to compute distances between the different categories. A natural choice is to set the distance is 0 if two categories are equal and 1 otherwise, similar to the idea of Hamming distances. This approach can be combined with the idea of automatic relevance determination (ARD) where each categorical variable has its own lengthscale. Rather than optimizing the acquisition function in a continuously relaxed space, we optimize it separately over each combination of the categorical variables. While this is likely to result in better optimization performance, it may lead to slow optimization of the acquisition function when there are many categorical variables. #### Empirical Bayes and Thompson sampling + For [Bandit optimization](banditopt.md), The [`get_empirical_bayes_thompson`](../api/modelbridge.html#ax.modelbridge.factory.get_empirical_bayes_thompson) factory function returns a model that applies [empirical Bayes shrinkage](banditopt.md#empirical-bayes) to a discrete set of arms, and then uses Thompson sampling to construct a policy with the weight that should be allocated to each arms. Here we apply empirical Bayes to the data and use Thompson sampling to generate a policy that is truncated at `n=10` arms: -```Python + +```python from ax.modelbridge.factory import get_empirical_bayes_thompson m = get_empirical_bayes_thompson(experiment, data) gr = m.gen(n=10, optimization_config=optimization_config) ``` + The arms and their corresponding weights can be accessed as `gr.arm_weights`. As with the GP, we can use [`predict`](../api/modelbridge.html#ax.modelbridge.base.ModelBridge.predict) to evaluate the model at points of our choosing. However, because this is a purely in-sample model, those points should correspond to arms that were in the data. The model prediction will return the estimate at that point after applying the empirical Bayes shrinkage: -```Python + +```python f, cov = m.predict([ObservationFeatures(parameters={'x1': 3.14, 'x2': 2.72})]) ``` + We can generate a plot that shows the predictions for each arm with the shrinkage using [`plot_fitted`](../api/plot.html#ax.plot.scatter.plot_fitted), which shows model predictions on all in-sample arms: -```Python + +```python from ax.plot.scatter import plot_fitted render(plot_fitted(m, metric="metric_a", rel=False)) ``` -
+
#### Factorial designs The factory function [`get_factorial`](../api/modelbridge.html#ax.modelbridge.factory.get_factorial) can be used to construct a factorial design on a set of [`ChoiceParameters`](../api/core.html#ax.core.parameter.ChoiceParameter). -```Python + +```python from ax.modelbridge.factory import get_factorial m = get_factorial(search_space) gr = m.gen(n=10) ``` -Like the Sobol sequence, the factorial model is only used to generate points and does not implement [`predict`](../api/modelbridge.html#ax.modelbridge.base.ModelBridge.predict). +Like the Sobol sequence, the factorial model is only used to generate points and does not implement [`predict`](../api/modelbridge.html#ax.modelbridge.base.ModelBridge.predict). ## Deeper dive: organization of the modeling stack @@ -145,17 +164,16 @@ The [`ModelBridge`](../api/modelbridge.html#ax.modelbridge.base.ModelBridge) is Model objects are only used in Ax via a [`ModelBridge`](../api/modelbridge.html#ax.modelbridge.base.ModelBridge). Each Model object defines an API which does not use Ax objects, allowing for modularity of different model types and making it easy to implement new models. For example, the TorchModel defines an API for a model that operates on torch tensors. There is a 1-to-1 link between [`ModelBridge`](../api/modelbridge.html#ax.modelbridge.base.ModelBridge) objects and Model objects. For instance, the TorchModelBridge takes in Ax objects, converts them to torch tensors, and sends them along to the TorchModel. Similar pairings exist for all of the different model types: -| ModelBridge | Model | Example implementation | -| ------------------- | ------------- | ----------------------------- | -| [`TorchModelBridge`](../api/modelbridge.html#module-ax.modelbridge.torch) | [`TorchModel`](../api/models.html#ax.models.torch_base.TorchModel) | [`BotorchModel`](../api/models.html#ax.models.torch.botorch.BotorchModel) | | -| [`DiscreteModelBridge`](../api/modelbridge.html#module-ax.modelbridge.discrete) | [`DiscreteModel`](../api/models.html#ax.models.discrete_base.DiscreteModel) | [`ThompsonSampler`](../api/models.html#ax.models.discrete.thompson.ThompsonSampler) | -| [`RandomModelBridge`](../api/modelbridge.html#module-ax.modelbridge.random) | [`RandomModel`](../api/models.html#ax.models.random.base.RandomModel) | [`SobolGenerator`](../api/models.html#ax.models.random.sobol.SobolGenerator) | +| ModelBridge | Model | Example implementation | | +| -------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------ | - | +| [`TorchModelBridge`](../api/modelbridge.html#module-ax.modelbridge.torch) | [`TorchModel`](../api/models.html#ax.models.torch_base.TorchModel) | [`BotorchModel`](../api/models.html#ax.models.torch.botorch.BotorchModel) | | +| [`DiscreteModelBridge`](../api/modelbridge.html#module-ax.modelbridge.discrete) | [`DiscreteModel`](../api/models.html#ax.models.discrete_base.DiscreteModel) | [`ThompsonSampler`](../api/models.html#ax.models.discrete.thompson.ThompsonSampler) | | +| [`RandomModelBridge`](../api/modelbridge.html#module-ax.modelbridge.random) | [`RandomModel`](../api/models.html#ax.models.random.base.RandomModel) | [`SobolGenerator`](../api/models.html#ax.models.random.sobol.SobolGenerator) | | This structure allows for different models like the GP in BotorchModel and the Random Forest in RandomForest to share an interface and use common plotting tools at the level of the ModelBridge, while each is implemented using its own torch or numpy structures. The primary role of the [`ModelBridge`](../api/modelbridge.html#ax.modelbridge.base.ModelBridge) is to act as a transformation layer. This includes transformations to the data, search space, and optimization config such as standardization and log transforms, as well as the final transform from Ax objects into the objects consumed by the Model. We now describe how transforms are implemented and used in the ModelBridge. - ## Transforms The transformations in the [`ModelBridge`](../api/modelbridge.html#ax.modelbridge.base.ModelBridge) are done by chaining together a set of individual Transform objects. For continuous space models obtained via factory functions ([`get_sobol`](/api/data.html#.data.users.adamobeng.fbsource.fbcode.ax.ax.modelbridge.factory.get_sobol) and [`Models.BOTORCH_MODULAR`](/api/data.html#.data.users.adamobeng.fbsource.fbcode.ax.ax.modelbridge.registry.Models)), the following transforms will be applied by default, in this sequence: @@ -180,13 +198,13 @@ See [the API reference](../api/modelbridge.html#transforms) for the full collect The structure of the modeling stack makes it easy to implement new models and use them inside Ax. There are two ways this might be done. - ### Using an existing Model interface The easiest way to implement a new model is if it can be adapted to one of the existing Model interfaces: ([`TorchModel`](api/models.html#ax.models.torch_base.TorchModel), [`DiscreteModel`](../api/models.html#ax.models.discrete_base.DiscreteModel), or [`RandomModel`](../api/models.html#ax.models.random.base.RandomModel)). The class definition provides the interface for each of the methods that should be implemented in order for Ax to be able to fully use the new model. Note however that not all methods must need be implemented to use some Ax functionality. For instance, an implementation of [`TorchModel`](../api/models.html#ax.models.torch_base.TorchModel) that implements only [`fit`](../api/models.html#ax.models.torch_base.TorchModel.fit) and [`predict`](../api/models.html#ax.models.torch_base.TorchModel.predict) can be used to fit data and make plots in Ax; however, it will not be able to generate new candidates (requires implementing [`gen`](../api/models.html#ax.models.torch_base.TorchModel.gen)) or be used with Ax's cross validation utility (requires implementing [`cross_validate`](../api/models.html#ax.models.torch_base.TorchModel.cross_validate)). Once the new model has been implemented, it can be used in Ax with the corresponding [`ModelBridge`](../api/modelbridge.html#ax.modelbridge.base.ModelBridge) from the table above. For instance, suppose a new torch-based model was implemented as a subclass of [`TorchModel`](../api/models.html#ax.models.torch_base.TorchModel). We can use that model in Ax like: -```Python + +```python new_model_obj = NewModel(init_args) # An instance of the new model class m = TorchModelBridge( experiment=experiment, @@ -196,6 +214,7 @@ m = TorchModelBridge( transforms=[UnitX, StandardizeY], # Include the desired set of transforms ) ``` + The [`ModelBridge`](../api/modelbridge.html#ax.modelbridge.base.ModelBridge) object `m` can then be used with plotting and cross validation utilities exactly the same way as the built-in models. ### Creating a new Model interface diff --git a/docs/storage.md b/docs/storage.md index 6b0b52e1a6f..8d57bed52e6 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -2,7 +2,6 @@ id: storage title: Storage --- - Ax has extensible support for saving and loading experiments in both JSON and SQL. The former is a good option for users who prefer lightweight, transportable storage, and the latter is better suited to production applications requiring a centralized, high-performance database. ## JSON @@ -86,6 +85,7 @@ init_engine_and_session_factory(url="postgresql+psycopg2://[USERNAME]:[PASSWORD] ``` Then create all tables: + ```py from ax.storage.sqa_store.db import get_engine, create_all_tables @@ -94,6 +94,7 @@ create_all_tables(engine) ``` Then save your experiment: + ```py from ax import Experiment from ax.storage.sqa_store.save import save_experiment @@ -193,7 +194,6 @@ loaded_experiment = load_experiment(experiment_name="my_experiment", config=sqa_ If you choose to add types to your experiments, create an Enum mapping experiment types to integer representations, pass this Enum to a custom instance of `SQAConfig`, and then pass the config to `sqa_store.save`: - ```py from ax import Experiment from ax.storage.sqa_store.save import save_experiment @@ -211,7 +211,6 @@ save_experiment(experiment, config=config) If you choose to add types to your generator runs (beyond the existing `status_quo` type), create an enum mapping generator run types to integer representations, pass this enum to a custom instance of `SQAConfig`, and then pass the config to `sqa_store.save`: - ```py from ax import Experiment from ax.storage.sqa_store.save import save_experiment diff --git a/docs/trial-evaluation.md b/docs/trial-evaluation.md index a4d81e50ae2..2e1777226d2 100644 --- a/docs/trial-evaluation.md +++ b/docs/trial-evaluation.md @@ -2,7 +2,6 @@ id: trial-evaluation title: Trial Evaluation --- - There are 3 paradigms for evaluating [trials](glossary.md#trial) in Ax. Note: ensure that you are using the [appropriate type of trials](core.md#trial-vs-batched-trial) for your @@ -36,9 +35,9 @@ method requires `raw_data` evaluated from the parameters suggested by The data can be in the form of: -- A dictionary of metric names to tuples of (mean and [SEM](glossary.md#sem)) -- A single (mean, SEM) tuple -- A single mean +- A dictionary of metric names to tuples of (mean and [SEM](glossary.md#sem)) +- A single (mean, SEM) tuple +- A single mean In the second case, Ax will assume that the mean and the SEM are for the experiment objective (if the evaluations are noiseless, simply provide an SEM of @@ -104,10 +103,10 @@ The Developer API is supported by the [`Experiment`](/api/core.html#module-ax.core.experiment) class. In this paradigm, the user specifies: -- [`Runner`](../api/core.html#ax.core.runner.Runner): Defines how to deploy the - experiment. -- List of [`Metrics`](../api/core.html#ax.core.metric.Metric): Each defines how - to compute/fetch data for a given objective or outcome. +- [`Runner`](../api/core.html#ax.core.runner.Runner): Defines how to deploy the + experiment. +- List of [`Metrics`](../api/core.html#ax.core.metric.Metric): Each defines how + to compute/fetch data for a given objective or outcome. The experiment requires a `generator_run` to create a new trial or batch trial. A generator run can be generated by a model. The trial then has its own `run` diff --git a/docs/tutorials/index.mdx b/docs/tutorials/index.mdx new file mode 100644 index 00000000000..48904a589b2 --- /dev/null +++ b/docs/tutorials/index.mdx @@ -0,0 +1,40 @@ +--- +title: Welcome to Ax Tutorials +sidebar_label: Overview +--- + +Here you can learn about the structure and applications of Ax from examples. + +**Our 3 API tutorials:** [Loop](gpei_hartmann_loop.html), [Service](gpei_hartmann_service.html), and [Developer](gpei_hartmann_developer.html) — are a good place to start. Each tutorial showcases optimization on a constrained Hartmann6 problem, with the Loop API being the simplest to use and the Developer API being the most customizable. + +**NOTE: We recommend the [Service API](gpei_hartmann_service.html) for the vast majority of use cases.** This API provides an ideal balance of flexibility and simplicity for most users, and we are in the process of consolidating Ax usage around it more formally. + +**Further, we explore the different components available in Ax in more detail.** {' '} The components explored below serve to set up an experiment, visualize its results, configure an optimization algorithm, run an entire experiment in a managed closed loop, and combine BoTorch components in Ax in a modular way. + +* [Visualizations](visualizations.html) illustrates the different plots available to view and understand your results. + +* [GenerationStrategy](generation_strategy.html) steps through setting up a way to specify the optimization algorithm (or multiple). A `GenerationStrategy` is an important component of Service API and the `Scheduler`. + +* [Scheduler](scheduler.html) demonstrates an example of a managed and configurable closed-loop optimization, conducted in an asyncronous fashion. `Scheduler` is a manager abstraction in Ax that deploys trials, polls them, and uses their results to produce more trials. + +* [Modular `BoTorchModel`](modular_botax.html) walks though a new beta-feature — an improved interface between Ax and{' '} [BoTorch](https://botorch.org/) — which allows for combining arbitrary BoTorch components like `AcquisitionFunction`, `Model`, `AcquisitionObjective` etc. into a single{' '} `Model` in Ax. + +**Our other Bayesian Optimization tutorials include:** + +* [Hyperparameter Optimization for PyTorch](tune_cnn_service.html) provides an example of hyperparameter optimization with Ax and integration with an external ML library. + +* [Hyperparameter Optimization on SLURM via SubmitIt](submitit.html) shows how to use the AxClient to schedule jobs and tune hyperparameters on a Slurm cluster. + +* [Multi-Task Modeling](multi_task.html) illustrates multi-task Bayesian Optimization on a constrained synthetic Hartmann6 problem. + +* [Multi-Objective Optimization](multiobjective_optimization.html) demonstrates Multi-Objective Bayesian Optimization on a synthetic Branin-Currin test function. + +* [Trial-Level Early Stopping](early_stopping/early_stopping.html) shows how to use trial-level early stopping on an ML training job to save resources and iterate faster. + +{/* * [Benchmarking Suite](benchmarking_suite_example.html) demonstrates how to use the Ax benchmarking suite to compare Bayesian Optimization algorithm performances and generate a comparative report with visualizations. */} + +For experiments done in a real-life setting, refer to our field experiments tutorials: + +* [Bandit Optimization](factorial.html) shows how Thompson Sampling can be used to intelligently reallocate resources to well-performing configurations in real-time. + +* [Human-in-the-Loop Optimization](human_in_the_loop/human_in_the_loop.html) walks through manually influencing the course of optimization in real-time. diff --git a/docs/why-ax.md b/docs/why-ax.md index 29bbcc78872..846ed759c0b 100644 --- a/docs/why-ax.md +++ b/docs/why-ax.md @@ -1,9 +1,10 @@ --- id: why-ax title: Why Ax? -sidebar_label: Why Ax? --- +# Why Ax? + Developers and researchers alike face problems which confront them with a large space of possible ways to configure something –– whether those are "magic numbers" used for infrastructure or compiler flags, learning rates or other hyperparameters in machine learning, or images and calls-to-action used in marketing promotions. Selecting and tuning these configurations can often take time, resources, and can affect the quality of user experiences. Ax is a machine learning system to help automate this process, so that researchers and developers can determine how to get the most out of their software in an optimally efficient way. Ax is a platform for optimizing any kind of experiment, including machine learning experiments, A/B tests, and simulations. Ax can optimize discrete configurations (e.g., variants of an A/B test) using multi-armed bandit optimization, and continuous (e.g., integer or floating point)-valued configurations using Bayesian optimization. This makes it suitable for a wide range of applications. @@ -11,7 +12,8 @@ Ax is a platform for optimizing any kind of experiment, including machine learni Ax has been successfully applied to a variety of product, infrastructure, ML, and research applications at Facebook. # Unique capabilities -- **Support for noisy functions**. Results of A/B tests and simulations with reinforcement learning agents often exhibit high amounts of noise. Ax supports [state-of-the-art algorithms](https://research.facebook.com/blog/2018/09/efficient-tuning-of-online-systems-using-bayesian-optimization/) which work better than traditional Bayesian optimization in high-noise settings. -- **Customization**. Ax's developer API makes it easy to integrate custom data modeling and decision algorithms. This allows developers to build their own custom optimization services with minimal overhead. -- **Multi-modal experimentation**. Ax has first-class support for running and combining data from different types of experiments, such as "offline" simulation data and "online" data from real-world experiments. -- **Multi-objective optimization**. Ax supports multi-objective and constrained optimization which are common to real-world problems, like improving load time without increasing data use. + +- **Support for noisy functions**. Results of A/B tests and simulations with reinforcement learning agents often exhibit high amounts of noise. Ax supports [state-of-the-art algorithms](https://research.facebook.com/blog/2018/09/efficient-tuning-of-online-systems-using-bayesian-optimization/) which work better than traditional Bayesian optimization in high-noise settings. +- **Customization**. Ax's developer API makes it easy to integrate custom data modeling and decision algorithms. This allows developers to build their own custom optimization services with minimal overhead. +- **Multi-modal experimentation**. Ax has first-class support for running and combining data from different types of experiments, such as "offline" simulation data and "online" data from real-world experiments. +- **Multi-objective optimization**. Ax supports multi-objective and constrained optimization which are common to real-world problems, like improving load time without increasing data use. diff --git a/scripts/convert_ipynb_to_mdx.py b/scripts/convert_ipynb_to_mdx.py new file mode 100644 index 00000000000..b2d28cf6d54 --- /dev/null +++ b/scripts/convert_ipynb_to_mdx.py @@ -0,0 +1,923 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import io +import json +import os +import re +import shutil +import subprocess +import uuid +from pathlib import Path +from typing import Dict, List, Optional, Tuple, Union + +import mdformat +import nbformat +import pandas as pd +from nbformat.notebooknode import NotebookNode + +SCRIPTS_DIR = Path(__file__).parent.resolve() +LIB_DIR = SCRIPTS_DIR.parent.resolve() + +WEBSITE_DIR = LIB_DIR.joinpath("website") +DOCS_DIR = LIB_DIR.joinpath("docs") +TUTORIALS_DIR = DOCS_DIR.joinpath("tutorials") + +ORGANIZATION = "facebook" +PROJECT = "ax" + +# Data display priority. Below lists the priority for displaying data from cell outputs. +# Cells can output many different items, and some will output a fallback display, e.g. +# text/plain if text/html is not working. The below priorities help ensure the output in +# the MDX file shows the best representation of the cell output. +priorities = [ + "error", + "text/markdown", + "image/png", # matplotlib output. + "application/vnd.jupyter.widget-view+json", # tqdm progress bars. + "application/vnd.plotly.v1+json", # Plotly + "text/html", + "stream", + "text/plain", +] + + +def load_nb_metadata() -> Dict[str, Dict[str, str]]: + """ + Load the metadata and list of notebooks that are to be converted to MDX. + + Args: + None + + Returns: + Dict[str, Dict[str, str]]: A dictionary of metadata needed to convert notebooks + to MDX. Only those notebooks that are listed in the `tutorials.json` file + will be included in the Docusaurus MDX output. + """ + tutorials_json_path = WEBSITE_DIR.joinpath("tutorials.json") + with tutorials_json_path.open("r") as f: + tutorials_data = json.load(f) + # flatten tutorial config, we handle the nested structure in sidebars.js + tutorial_configs = [ + config for category in tutorials_data.values() for config in category + ] + return tutorial_configs + + +def load_notebook(path: Path) -> NotebookNode: + """ + Load the given notebook into memory. + + Args: + path (Path): Path to the Jupyter notebook. + + Returns: + NotebookNode: `nbformat` object, which contains all the notebook cells in it. + """ + with path.open("r") as f: + nb_str = f.read() + nb = nbformat.reads(nb_str, nbformat.NO_CONVERT) + return nb + + +def create_folders(path: Path) -> Tuple[str, Path]: + """ + Create asset folders for the tutorial. + + Args: + path (Path): Path to the Jupyter notebook. + + Returns: + Tuple[str, Path]: Returns a tuple with the filename to use for the MDX file + and the path for the MDX assets folder. + """ + tutorial_folder_name = path.stem + filename = "".join([token.title() for token in tutorial_folder_name.split("_")]) + tutorial_folder = TUTORIALS_DIR.joinpath(tutorial_folder_name) + assets_folder = tutorial_folder / "assets" + img_folder = assets_folder / "img" + plot_data_folder = assets_folder / "plot_data" + if not tutorial_folder.exists(): + tutorial_folder.mkdir(parents=True, exist_ok=True) + if not img_folder.exists(): + img_folder.mkdir(parents=True, exist_ok=True) + if not plot_data_folder.exists(): + plot_data_folder.mkdir(parents=True, exist_ok=True) + return filename, assets_folder + + +def create_frontmatter(path: Path, nb_metadata: Dict[str, Dict[str, str]]) -> str: + """ + Create frontmatter for the resulting MDX file. + + The frontmatter is the data between the `---` lines in an MDX file. + + Args: + path (Path): Path to the Jupyter notebook. + nb_metadata (Dict[str, Dict[str, str]]): The metadata associated with the given + notebook. Metadata is defined in the `tutorials.json` file. + + Returns: + str: MDX formatted frontmatter. + """ + # Add the frontmatter to the MDX string. This is the part between the `---` lines + # that define the tutorial sidebar_label information. + frontmatter_delimiter = ["---"] + frontmatter = [ + f'"{key}": "{value}"' + for key, value in { + "title": metadata["title"], + "sidebar_label": metadata["title"], + }.items() + ] + frontmatter = "\n".join(frontmatter_delimiter + frontmatter + frontmatter_delimiter) + mdx = mdformat.text(frontmatter, options={"wrap": 88}, extensions={"myst"}) + return f"{mdx}\n" + + +def create_imports() -> str: + """ + Create the imports needed for displaying buttons, and interactive plots in MDX. + + Returns: + str: MDX formatted imports. + """ + link_btn = "@site/src/components/LinkButtons.jsx" + cell_out = "@site/src/components/CellOutput.jsx" + plot_out = "@site/src/components/Plotting.jsx" + imports = f'import LinkButtons from "{link_btn}";\n' + imports += f'import CellOutput from "{cell_out}";\n' + imports += f'import {{PlotlyFigure}} from "{plot_out}";\n' + return f"{imports}\n" + + +def get_current_git_tag() -> Optional[str]: + """ + Retrieve the current Git tag if the current commit is tagged. + + Returns: + Optional[str]: The current Git tag as a string if available, otherwise None. + """ + try: + tag = ( + subprocess.check_output( + ["git", "describe", "--tags", "--exact-match"], stderr=subprocess.STDOUT + ) + .strip() + .decode("utf-8") + ) + return tag + except subprocess.CalledProcessError: + return None + + +def create_buttons( + nb_metadata: Dict[str, Dict[str, str]], +) -> str: + """ + Create buttons that link to Colab and GitHub for the tutorial. + + Args: + nb_metadata (Dict[str, Dict[str, str]]): Metadata for the tutorial. + + Returns: + str: MDX formatted buttons. + """ + version = get_current_git_tag() or "main" + github_path = ( + f"{ORGANIZATION}/{PROJECT}/blob/{version}/tutorials/" + f"{nb_metadata['id']}/{nb_metadata['id']}.ipynb" + ) + github_url = f"https://github.com/{github_path}" + colab_url = f"https://colab.research.google.com/github/{github_path}" + return f'\n\n' + + +def handle_images_found_in_markdown( + markdown: str, + new_img_dir: Path, + lib_dir: Path, +) -> str: + """ + Update image paths in the Markdown, and copy the image to the docs location. + + The pattern we search for in the Markdown is + ``![alt-text](path/to/image.png "title")`` with two groups: + + - group 1 = path/to/image.png + - group 2 = "title" + + The first group (the path to the image from the original notebook) will be replaced + with ``assets/img/{name}`` where the name is `image.png` from the example above. The + original image will also be copied to the new location + ``{new_img_dir}/assets/img/{name}``, which can be directly read into the MDX file. + + Args: + markdown (str): Markdown where we look for Markdown flavored images. + new_img_dir (Path): Path where images are copied to for display in the + MDX file. + lib_dir (Path): The location for the Bean Machine repo. + + Returns: + str: The original Markdown with new paths for images. + """ + markdown_image_pattern = re.compile(r"""!\[[^\]]*\]\((.*?)(?=\"|\))(\".*\")?\)""") + searches = list(re.finditer(markdown_image_pattern, markdown)) + + # Return the given Markdown if no images are found. + if not searches: + return markdown + + # Convert the given Markdown to a list so we can delete the old path with the new + # standard path. + markdown_list = list(markdown) + for search in searches: + # Find the old image path and replace it with the new one. + old_path, _ = search.groups() + start = 0 + end = 0 + search = re.search(old_path, markdown) + if search is not None: + start, end = search.span() + old_path = Path(old_path) + name = old_path.name.strip() + new_path = f"assets/img/{name}" + del markdown_list[start:end] + markdown_list.insert(start, new_path) + + # Copy the original image to the new location. + if old_path.exists(): + old_img_path = old_path + else: + # Here we assume the original image exists in the same directory as the + # notebook, which should be in the tutorials folder of the library. + old_img_path = (lib_dir / "tutorials" / old_path).resolve() + new_img_path = str(new_img_dir / name) + shutil.copy(str(old_img_path), new_img_path) + + return "".join(markdown_list) + + +def transform_style_attributes(markdown: str) -> str: + """ + Convert HTML style attributes to something React can consume. + + Args: + markdown (str): Markdown where we look for HTML style attributes. + + Returns: + str: The original Markdown with new React style attributes. + """ + # Finds all instances of `style="attr: value; ..."`. + token = "style=" + pattern = re.compile(f"""{token}["'`]([^"]*)["'`]""") + found_patterns = re.findall(pattern, markdown) + if not found_patterns: + return markdown + + for found_pattern in found_patterns: + # Step 1: splits "attr: value; ..." to + # ["attr: value", ..."]. + step1 = [token.strip() for token in found_pattern.split(";") if token] + + # Step 2: splits ["attr: value", ...] to + # [["attr", "value"], ...]. + step2 = [[token.strip() for token in tokens.split(":")] for tokens in step1] + + # Step 3: converts [["attr", "value"], ...] to + # '{"attr": "value", ...}'. + step3 = json.dumps(dict(step2)) + + # Step 4 wraps the JSON object in {}, so we end up with a string of the form; + # '{{"attr": "value", ...}}'. + step4 = f"{{{step3}}}" + + # Step 5 replaces the old style data with the React style data, and clean the + # string for inclusion in the final Markdown. + markdown = markdown.replace(found_pattern, step4) + markdown = markdown.replace('"{{', "{{").replace('}}"', "}}") + return markdown + + +def sanitize_mdx(mdx: str) -> str: + """ + Sanitize the given MDX string. + + Args: + mdx (str): MDX string to sanitize. + + Returns: + str: Sanitized MDX string. + """ + # Remove some lingering HTML tags that break MDX. + mdx = mdx.replace("", "") + mdx = mdx.replace("", "") + mdx = mdx.replace("
", "
") + # Remove any HTML comments from the Markdown. They are fine to keep in the + # notebooks, but are not really useful in the MDX. + mdx = re.sub("()", "", mdx, flags=re.DOTALL) + # "\" Escape braces to make the text MDX compatible. + mdx = re.sub("([^\\\\])([{}])", "\\g<1>\\\\\\g<2>", mdx) + + # -- KaTeX -- + # Wrap '\begin{}...\end{}' in $$ for KaTeX to work. + mdx = re.sub( + "(\\\\begin\\\\{(\\w*?)\\\\}(.|\n)*?end\\\\{\\2\\\\})", "$$\\g<1>$$", mdx + ) + # Make sure $$ symbols are not escaped and include line breaks. + mdx = re.sub( + "\\\\?\\$\\\\?\\$((?:.|\n)*?)\\\\?\\$\\\\?\\$", "\n$$\n\\g<1>\n$$\n", mdx + ) + # Escaping braces causes issues in math blocks, unescape them. + mdx = re.sub( + "\\$?\\$(.|\n)*?\\$\\$?", + lambda match: match[0].replace("\\{", "{").replace("\\}", "}"), + mdx, + ) + + return mdx + + +def handle_markdown_cell( + cell: NotebookNode, + new_img_dir: Path, + lib_dir: Path, +) -> str: + """ + Handle the given Jupyter Markdown cell and convert it to MDX. + + Args: + cell (NotebookNode): Jupyter Markdown cell object. + new_img_dir (Path): Path where images are copied to for display in the + Markdown cell. + lib_dir (Path): The location for the Bean Machine library. + + Returns: + str: Transformed Markdown object suitable for inclusion in MDX. + """ + markdown = cell["source"] + + # Update image paths in the Markdown and copy them to the Markdown tutorials folder. + # Skip - Our images are base64 encoded, so we don't need to copy them to the docs + # folder. + # markdown = handle_images_found_in_markdown(markdown, new_img_dir, lib_dir) + + markdown = sanitize_mdx(markdown) + mdx = mdformat.text(markdown, options={"wrap": 88}, extensions={"myst"}) + + # We will attempt to handle inline style attributes written in HTML by converting + # them to something React can consume. This has to be done after the + # mdformat.text() step since mdformat complains about the converted styles. + mdx = transform_style_attributes(mdx) + + return f"{mdx}\n" + + +def handle_cell_input(cell: NotebookNode, language: str) -> str: + """ + Create a Markdown cell block using the given cell source, and the language. + + The given language will determine cell input syntax styles. Docusaurus uses Prism as + the syntax highlighter, https://prismjs.com. See the Docusaurus documentation for + more information on code blocks + https://docusaurus.io/docs/markdown-features/code-blocks. + + Args: + cell (NotebookNode): A notebook cell. + language (str): Language specifier for syntax highlighting. + + Returns: + str: Code block formatted Markdown string. + """ + cell_source = cell.get("source", "") + return f"```{language}\n{cell_source}\n```\n\n" + + +def handle_image( + values: List[Dict[str, Union[int, str, NotebookNode]]], +) -> List[Tuple[int, str]]: + """ + Convert embedded images to string MDX can consume. + + Args: + values (List[Dict[str, Union[int, str, NotebookNode]]]): Bokeh tagged cell + outputs. + + Returns: + List[Tuple[int, str]]: A list of tuples, where the first entry in the tuple is + the index where the output occurred from the cell, and the second entry of + the tuple is the MDX formatted string. + """ + output = [] + for value in values: + index = value["index"] + mime_type = value["mime_type"] + img = value["data"] + output.append((index, f"![](data:image/{mime_type};base64,{img})\n\n")) + return output + + +def handle_markdown( + values: List[Dict[str, Union[int, str, NotebookNode]]], +) -> List[Tuple[int, str]]: + """ + Convert and format Markdown for MDX. + + Args: + values (List[Dict[str, Union[int, str, NotebookNode]]]): Bokeh tagged cell + outputs. + + Returns: + List[Tuple[int, str]]: A list of tuples, where the first entry in the tuple is + the index where the output occurred from the cell, and the second entry of + the tuple is the MDX formatted string. + """ + output = [] + for value in values: + index = int(value["index"]) + markdown = str(value["data"]) + markdown = mdformat.text(markdown, options={"wrap": 88}, extensions={"myst"}) + output.append((index, f"{markdown}\n\n")) + return output + + +def handle_pandas( + values: List[Dict[str, Union[int, str, NotebookNode]]], +) -> List[Tuple[int, str]]: + """ + Handle how to display pandas DataFrames. + + There is a scoped style tag in the DataFrame output that uses the class name + `dataframe` to style the output. We will use this token to determine if a pandas + DataFrame is being displayed. + + Args: + values (List[Dict[str, Union[int, str, NotebookNode]]]): Bokeh tagged cell + outputs. + + Returns: + List[Tuple[int, str]]: A list of tuples, where the first entry in the tuple is + the index where the output occurred from the cell, and the second entry of + the tuple is the MDX formatted string. + """ + output = [] + for value in values: + index = int(value["index"]) + data = str(value["data"]) + if 'class="dataframe"' not in data: + # pd.read_html() raises an error if there's no dataframe. + continue + df = pd.read_html(io.StringIO(data), flavor="lxml") + # NOTE: The return is a list of dataframes and we only care about the first + # one. + md_df = df[0] + for column in md_df.columns: + if column.startswith("Unnamed"): + md_df.rename(columns={column: ""}, inplace=True) + # Remove the index if it is just a range, and output to markdown. + mdx = "" + if isinstance(md_df.index, pd.RangeIndex): + mdx = md_df.to_markdown(index=False) + elif not isinstance(md_df.index, pd.RangeIndex): + mdx = md_df.to_markdown() + output.append((index, f"\n{sanitize_mdx(mdx)}\n\n")) + return output + + +def handle_plain( + values: List[Dict[str, Union[int, str, NotebookNode]]], +) -> List[Tuple[int, str]]: + """ + Handle how to plain cell output should be displayed in MDX. + + Args: + values (List[Dict[str, Union[int, str, NotebookNode]]]): Bokeh tagged cell + outputs. + + Returns: + List[Tuple[int, str]]: A list of tuples, where the first entry in the tuple is + the index where the output occurred from the cell, and the second entry of + the tuple is the MDX formatted string. + """ + output = [] + for value in values: + index = int(value["index"]) + data = str(value["data"]) + data = [line.strip() for line in data.splitlines() if line] + data = [datum for datum in data if datum] + if data: + data = "\n".join([line for line in str(value["data"]).splitlines() if line]) + # Remove backticks to make the text MDX compatible. + data = data.replace("`", "") + output.append( + (index, f"\n{{\n `{data}`\n}}\n\n\n"), + ) + return output + + +def handle_plotly( + values: List[Dict[str, Union[int, str, NotebookNode]]], + plot_data_folder: Path, +) -> List[Tuple[int, str]]: + """ + Convert Plotly outputs to MDX. + + Args: + values (List[Dict[str, Union[int, str, NotebookNode]]]): Bokeh tagged cell + outputs. + plot_data_folder (Path): Path to the folder where plot data should be + stored. + + Returns: + List[Tuple[int, str]]: A list of tuples, where the first entry in the tuple is + the index where the output occurred from the cell, and the second entry of + the tuple is the MDX formatted string. + """ + output = [] + for value in values: + index = value["index"] + data = value["data"] + file_name = str(uuid.uuid4()) + file_path = plot_data_folder / f"{file_name}.json" + path_to_data = f"./assets/plot_data/{file_name}.json" + output.append( + (index, f"\n\n"), + ) + with file_path.open("w") as f: + json.dump(data, f, indent=2) + return output + + +def handle_tqdm( + values: List[Dict[str, Union[int, str, NotebookNode]]], +) -> List[Tuple[int, str]]: + """ + Handle the output of tqdm. + + tqdm will be displayed as separate CellOutput React components if we do not + aggregate them all into a single CellOutput object, which is what this method does. + + Args: + values (List[Dict[str, Union[int, str, NotebookNode]]]): Bokeh tagged cell + outputs. + + Returns: + List[Tuple[int, str]]: A list of tuples, where the first entry in the tuple is + the index where the output occurred from the cell, and the second entry of + the tuple is the MDX formatted string. + """ + output = sorted(values, key=lambda item: item["index"]) + index = int(output[0]["index"]) + md = "\n".join([str(item["data"]) for item in output if item["data"]]) + return [(index, f"\n{{\n `{md}`\n}}\n\n\n")] + + +CELL_OUTPUTS_TO_PROCESS = Dict[ + str, + List[Dict[str, Union[int, str, NotebookNode]]], +] + + +def aggregate_mdx( + cell_outputs_to_process: CELL_OUTPUTS_TO_PROCESS, + plot_data_folder: Path, +) -> str: + """ + Aggregate the `cell_outputs_to_process` into MDX. + + Args: + cell_outputs_to_process (CELL_OUTPUTS_TO_PROCESS): A dictionary of cell outputs + that need further processing. + plot_data_folder (Path): Path to where plot data should be stored for the + tutorial. + + Returns: + str: MDX formatted string. + """ + processed_mdx = [] + for key, values in cell_outputs_to_process.items(): + if not values: + continue + if key == "image": + processed_mdx.extend(handle_image(values)) + if key == "markdown": + processed_mdx.extend(handle_markdown(values)) + if key == "pandas": + processed_mdx.extend(handle_pandas(values)) + if key == "plain": + processed_mdx.extend(handle_plain(values)) + if key == "plotly": + processed_mdx.extend(handle_plotly(values, plot_data_folder)) + if key == "tqdm": + processed_mdx.extend(handle_tqdm(values)) + + # Ensure the same ordering of the MDX happens as was found in the original cell + # output. + processed_mdx = sorted(processed_mdx, key=lambda item: item[0]) + mdx = "\n".join([item[1] for item in processed_mdx]) + return mdx + + +def prioritize_dtypes( + cell_outputs: List[NotebookNode], +) -> Tuple[List[List[str]], List[bool]]: + """ + Prioritize cell output data types. + + Args: + cell_outputs (List[NotebookNode]): A list of cell outputs. + + Returns: + Tuple[List[List[str]], List[bool]]: Return two items in the tuple; the first is + a list of prioritized data types and the second is a list boolean values + associated with the cell output having Plotly information in it or not. + """ + cell_output_dtypes = [ + ( + list(cell_output["data"].keys()) + if "data" in cell_output + else [cell_output["output_type"]] + ) + for cell_output in cell_outputs + ] + prioritized_cell_output_dtypes = [ + sorted( + set(dtypes).intersection(set(priorities)), + key=lambda dtype: priorities.index(dtype), + ) + for dtypes in cell_output_dtypes + ] + prioritized_cell_output_dtypes = [ + [str(item) for item in items] for items in prioritized_cell_output_dtypes + ] + plotly_flags = [ + any(["plotly" in output for output in outputs]) + for outputs in cell_output_dtypes + ] + return prioritized_cell_output_dtypes, plotly_flags + + +def aggregate_images_and_plotly( + prioritized_data_dtype: str, + cell_output: NotebookNode, + data: NotebookNode, + plotly_flags: List[bool], + cell_outputs_to_process: CELL_OUTPUTS_TO_PROCESS, + i: int, +) -> None: + """ + Aggregates images or Plotly cell outputs into an appropriate bucket. + + Args: + prioritized_data_dtype (str): The prioritized cell output data type. + cell_output (NotebookNode): The actual cell output from the notebook. + data (NotebookNode): The data of the cell output. + plotly_flags (List[bool]): True if a Plotly plot was found in the cell outputs + else False. + cell_outputs_to_process (CELL_OUTPUTS_TO_PROCESS): Dictionary containing + aggregated cell output objects. + i (int): Index for the cell output in the list of cell output objects. + + Returns: + None: Does not return anything, instead adds values to the + cell_outputs_to_process if applicable. + """ + if not plotly_flags[i]: + cell_outputs_to_process["image"].append( + {"index": i, "data": data, "mime_type": prioritized_data_dtype}, + ) + # Plotly outputs a static image, but we can use the JSON in the cell + # output to create interactive plots using a React component. + if plotly_flags[i]: + data = cell_output["data"]["application/vnd.plotly.v1+json"] + cell_outputs_to_process["plotly"].append({"index": i, "data": data}) + + +def aggregate_plain_output( + prioritized_data_dtype: str, + cell_output: NotebookNode, + data: NotebookNode, + cell_outputs_to_process: CELL_OUTPUTS_TO_PROCESS, + i: int, +) -> None: + """ + Aggregate plain text cell outputs together. + + Args: + prioritized_data_dtype (str): The prioritized cell output data type. + cell_output (NotebookNode): The actual cell output from the notebook. + data (NotebookNode): The data of the cell output. + cell_outputs_to_process (CELL_OUTPUTS_TO_PROCESS): Dictionary containing + aggregated cell output objects. + i (int): Index for the cell output in the list of cell output objects. + + Returns: + None: Does not return anything, instead adds values to the + cell_outputs_to_process if applicable. + """ + # Ignore error outputs. + if "name" in cell_output and cell_output["name"] == "stderr": + pass + # Ignore matplotlib legend text output. + if prioritized_data_dtype == "text/plain" and "matplotlib" in data: + pass + cell_outputs_to_process["plain"].append({"index": i, "data": data}) + + +def aggregate_output_types(cell_outputs: List[NotebookNode]) -> CELL_OUTPUTS_TO_PROCESS: + """ + Aggregate cell outputs into a dictionary for further processing. + + Args: + cell_outputs (List[NotebookNode]): List of cell outputs. + + Returns: + CELL_OUTPUTS_TO_PROCESS: Dictionary containing aggregated cell output objects. + """ + # We will use the below cell output data types for prioritizing the output shown in + # the MDX file. + prioritized_cell_output_dtypes, plotly_flags = prioritize_dtypes(cell_outputs) + + cell_outputs_to_process = { + "image": [], + "markdown": [], + "pandas": [], + "plain": [], + "plotly": [], + "tqdm": [], + } + for i, cell_output in enumerate(cell_outputs): + prioritized_data_dtype = prioritized_cell_output_dtypes[i][0] + + # If there is no `data` key in the cell_output, then it may be an error that + # needs to be handled. Even if it is not an error, the data is stored in a + # different key if no `data` key is found. + data = ( + cell_output["data"][prioritized_data_dtype] + if "data" in cell_output + else cell_output["text"] + if "text" in cell_output + else cell_output["evalue"] + ) + image_check = ( + prioritized_data_dtype.startswith("image") + or "plotly" in prioritized_data_dtype + ) + if image_check: + aggregate_images_and_plotly( + prioritized_data_dtype, + cell_output, + data, + plotly_flags, + cell_outputs_to_process, + i, + ) + plain_check = prioritized_data_dtype in ["text/plain", "stream", "error"] + if plain_check: + aggregate_plain_output( + prioritized_data_dtype, + cell_output, + data, + cell_outputs_to_process, + i, + ) + if prioritized_data_dtype == "text/markdown": + cell_outputs_to_process["markdown"].append({"index": i, "data": data}) + if "dataframe" in data: + cell_outputs_to_process["pandas"].append({"index": i, "data": data}) + if prioritized_data_dtype == "application/vnd.jupyter.widget-view+json": + data = cell_output["data"]["text/plain"] + cell_outputs_to_process["tqdm"].append({"index": i, "data": data}) + + return cell_outputs_to_process + + +def handle_cell_outputs(cell: NotebookNode, plot_data_folder: Path) -> str: + """ + Handle cell outputs and convert to MDX. + + Args: + cell (NotebookNode): The cell where the outputs need converting. + plot_data_folder (Path): Path to the folder where plot data should be + stored. + + Returns: + str: MDX formatted cell output. + """ + mdx = "" + + # Return an empty string if there are no actual cell outputs. + cell_outputs = cell.get("outputs", []) + if not cell_outputs: + return mdx + + # We will loop over all cell outputs and bucket them into the appropriate key in the + # dictionary below for further processing. Doing it in this way helps aggregate like + # outputs together e.g. tqdm outputs. + cell_outputs_to_process = aggregate_output_types(cell_outputs) + + # Now we process all aggregated cell outputs into a single output for the type. + md = aggregate_mdx(cell_outputs_to_process, plot_data_folder) + return md + + +def handle_code_cell(cell: NotebookNode, plot_data_folder: Path) -> str: + """ + Handle code cells in Jupyter notebooks and convert them to MDX. + + Args: + cell (NotebookNode): A Jupyter notebook cell that contains code. + plot_data_folder (Path): Path to the folder where plot data should be + stored. + + Returns: + str: MDX formatted code cell. + """ + cell_input_mdx = handle_cell_input(cell, "python") + cell_output_mdx = handle_cell_outputs(cell, plot_data_folder) + return cell_input_mdx + cell_output_mdx + + +def transform_notebook(path: Path, nb_metadata: object) -> str: + """ + Transform a notebook located at the given path into MDX. + + Args: + path (Path): Path to the Jupyter notebook tutorial. + + Returns: + str: MDX formatted string. + """ + filename, assets_folder = create_folders(path) + img_folder = assets_folder / "img" + plot_data_folder = assets_folder / "plot_data" + save_folder = assets_folder.joinpath("..").resolve() + nb = load_notebook(path) + mdx = "" + mdx += create_frontmatter(path, nb_metadata) + mdx += create_imports() + mdx += create_buttons(nb_metadata) + for cell in nb["cells"]: + cell_type = cell["cell_type"] + + # Handle a Markdown cell. + if cell_type == "markdown": + mdx += handle_markdown_cell(cell, img_folder, LIB_DIR) + + # Handle a code cell. + if cell_type == "code": + mdx += handle_code_cell(cell, plot_data_folder) + + # Write the MDX file to disk. + save_path = save_folder / "index.mdx" + with save_path.open("w") as f: + f.write(mdx) + + # Return the string for debugging purposes. + return mdx + + +def clean_up_directories() -> None: + """ + Delete output from previously converted notebooks. Particularly useful for + removing plot data since those filenames are randomly generated and won't + be replaced with future runs. + + Returns: + None: Does not return anything. + """ + if TUTORIALS_DIR.exists(): + # We intentionally leave the static `index.mdx` file in place since that is not + # autogenerated. + for item in os.scandir(TUTORIALS_DIR): + if item.is_dir(): + shutil.rmtree(item.path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Convert tutorial notebooks into mdx files." + ) + parser.add_argument( + "--clean", + action="store_true", + help="Delete output from previously converted notebooks.", + ) + args = parser.parse_args() + + tutorials_metadata = load_nb_metadata() + print("--------------------------------------------") + print("Converting tutorial notebooks into mdx files") + print("--------------------------------------------") + if args.clean: + clean_up_directories() + for metadata in tutorials_metadata: + path = ( + LIB_DIR / "tutorials" / metadata["id"] / (metadata["id"] + ".ipynb") + ).resolve() + print(f"{path.stem}") + mdx = transform_notebook(path, metadata) + print("") diff --git a/scripts/make_docs.sh b/scripts/make_docs.sh index 338309b5077..357afd1e284 100755 --- a/scripts/make_docs.sh +++ b/scripts/make_docs.sh @@ -13,7 +13,7 @@ usage() { echo "Build Ax documentation. Must be executed from root of Ax repository." echo "" echo " -b Build static version of documentation (otherwise start server)." - echo " -o Only Docusaurus (skip Sphinx, tutorials). Useful when just make change to Docusaurus settings." + echo " -o Only Docusaurus (skip tutorials). Useful when just make change to Docusaurus settings." echo " -t Execute tutorials (instead of just converting)." echo " -r Convert backtick-quoted class or function names in .md files into links to API documentation." echo "" @@ -49,14 +49,12 @@ while getopts 'hbotrk:' flag; do done if [[ $ONLY_DOCUSAURUS == false ]]; then - # generate Sphinx documentation echo "-----------------------------------" - echo "Generating API reference via Sphinx" + echo "Generating tutorials" echo "-----------------------------------" - cd sphinx || exit - make html - cd .. || exit -fi + python3 scripts/convert_ipynb_to_mdx.py --clean + +fi # end of not only Docusaurus block # init Docusaurus deps echo "-----------------------------------" @@ -65,47 +63,6 @@ echo "-----------------------------------" cd website || exit yarn -if [[ $ONLY_DOCUSAURUS == false ]]; then - # run script to parse html generated by sphinx - echo "--------------------------------------------" - echo "Parsing Sphinx docs and moving to Docusaurus" - echo "--------------------------------------------" - cd .. - mkdir -p "website/pages/api/" - - cwd=$(pwd) - python3 scripts/parse_sphinx.py -i "${cwd}/sphinx/build/html/" -o "${cwd}/website/pages/api/" - - SPHINX_JS_DIR='sphinx/build/html/_static/' - DOCUSAURUS_JS_DIR='website/static/js/' - - mkdir -p $DOCUSAURUS_JS_DIR - - # move JS files from /sphinx/build/html/_static/*: - cp "${SPHINX_JS_DIR}documentation_options.js" "${DOCUSAURUS_JS_DIR}documentation_options.js" - cp "${SPHINX_JS_DIR}doctools.js" "${DOCUSAURUS_JS_DIR}doctools.js" - cp "${SPHINX_JS_DIR}language_data.js" "${DOCUSAURUS_JS_DIR}language_data.js" - cp "${SPHINX_JS_DIR}searchtools.js" "${DOCUSAURUS_JS_DIR}searchtools.js" - - # searchindex.js is not static util - cp "sphinx/build/html/searchindex.js" "${DOCUSAURUS_JS_DIR}searchindex.js" - - # copy module sources - cp -r "sphinx/build/html/_sources/" "website/static/_sphinx-sources/" - - echo "-----------------------------------" - echo "Generating tutorials" - echo "-----------------------------------" - # mkdir -p "website/_tutorials" - # mkdir -p "website/static/files" - if [[ $BUILD_TUTORIALS == true ]]; then - python3 scripts/make_tutorials.py -w "${cwd}" -e - else - python3 scripts/make_tutorials.py -w "${cwd}" - fi - -cd website || exit -fi # end of not only Docusaurus block if [[ $INSERT_API_REFS == true ]]; then echo "-----------------------------------" @@ -128,3 +85,4 @@ else echo "-----------------------------------" yarn start fi +cd .. || exit diff --git a/scripts/make_tutorials.py b/scripts/make_tutorials.py deleted file mode 100644 index 005c2bbb342..00000000000 --- a/scripts/make_tutorials.py +++ /dev/null @@ -1,277 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import json -import os -import tarfile -import time -from pathlib import Path - -import nbformat -import papermill -from bs4 import BeautifulSoup -from nbclient.exceptions import CellTimeoutError -from nbconvert import HTMLExporter, ScriptExporter - -TUTORIALS_TO_SKIP = [ - "raytune_pytorch_cnn", # TODO: Times out CI but passes locally. Investigate. -] - - -TEMPLATE = """const CWD = process.cwd(); - -const React = require('react'); -const Tutorial = require(`${{CWD}}/core/Tutorial.js`); - -class TutorialPage extends React.Component {{ - render() {{ - const {{config: siteConfig}} = this.props; - const {{baseUrl}} = siteConfig; - return ( - - ); - }} -}} - -module.exports = TutorialPage; - -""" - -# we already load Plotly within html head on the site (just using -""" - - -def _get_paths(repo_dir: str, t_dir: str | None, tid: str) -> dict[str, str]: - if t_dir is not None: - tutorial_dir = os.path.join(repo_dir, "tutorials", t_dir) - html_dir = os.path.join(repo_dir, "website", "_tutorials", t_dir) - js_dir = os.path.join(repo_dir, "website", "pages", "tutorials", t_dir) - py_dir = os.path.join(repo_dir, "website", "static", "files", t_dir) - - for d in [tutorial_dir, html_dir, js_dir, py_dir]: - os.makedirs(d, exist_ok=True) - - tutorial_path = os.path.join(tutorial_dir, f"{tid}.ipynb") - html_path = os.path.join(html_dir, f"{tid}.html") - js_path = os.path.join(js_dir, f"{tid}.js") - ipynb_path = os.path.join(py_dir, f"{tid}.ipynb") - py_path = os.path.join(py_dir, f"{tid}.py") - else: - tutorial_dir = os.path.join(repo_dir, "tutorials") - tutorial_path = os.path.join(repo_dir, "tutorials", f"{tid}.ipynb") - html_path = os.path.join(repo_dir, "website", "_tutorials", f"{tid}.html") - js_path = os.path.join(repo_dir, "website", "pages", "tutorials", f"{tid}.js") - ipynb_path = os.path.join( - repo_dir, "website", "static", "files", f"{tid}.ipynb" - ) - py_path = os.path.join(repo_dir, "website", "static", "files", f"{tid}.py") - - paths = { - "tutorial_dir": tutorial_dir, - "tutorial_path": tutorial_path, - "html_path": html_path, - "js_path": js_path, - "ipynb_path": ipynb_path, - "py_path": py_path, - } - if t_dir is not None: - paths["tar_path"] = os.path.join(py_dir, f"{tid}.tar.gz") - return paths - - -def run_script( - tutorial: Path, timeout_minutes: int, env: dict[str, str] | None = None -) -> None: - if env is not None: - os.environ.update(env) - papermill.execute_notebook( - tutorial, - tutorial, - # This timeout is on cell-execution time, not on total runtime. - execution_timeout=timeout_minutes * 60, - ) - - -def gen_tutorials( - repo_dir: str, - exec_tutorials: bool, - name: str | None = None, - smoke_test: bool = False, -) -> None: - """Generate HTML tutorials for Docusaurus Ax site from Jupyter notebooks. - - Also create ipynb and py versions of tutorial in Docusaurus site for - download. - """ - has_errors = False - - with open(os.path.join(repo_dir, "website", "tutorials.json")) as infile: - tutorial_config = json.loads(infile.read()) - # flatten config dict - tutorial_configs = [ - config for category in tutorial_config.values() for config in category - ] - # Running only the tutorial described by "name" - if name is not None: - tutorial_configs = [d for d in tutorial_configs if d["id"] == name] - if len(tutorial_configs) == 0: - raise RuntimeError(f"No tutorial found with name {name}.") - # prepare paths for converted tutorials & files - os.makedirs(os.path.join(repo_dir, "website", "_tutorials"), exist_ok=True) - os.makedirs(os.path.join(repo_dir, "website", "static", "files"), exist_ok=True) - env = {"SMOKE_TEST": "True"} if smoke_test else None - - for config in tutorial_configs: - tid = config["id"] - t_dir = config.get("dir") - exec_on_build = config.get("exec_on_build", True) - print(f"Generating {tid} tutorial") - paths = _get_paths(repo_dir=repo_dir, t_dir=t_dir, tid=tid) - - total_time = None - - if tid in TUTORIALS_TO_SKIP: - print(f"Skipping execution of {tid}") - continue - elif exec_tutorials and exec_on_build: - tutorial_path = Path(paths["tutorial_path"]) - print(f"Executing tutorial {tid}") - start_time = time.monotonic() - - # Try / catch failures for now. We will re-raise at the end. - timeout_minutes = 15 if smoke_test else 150 - try: - # Execute notebook. - run_script( - tutorial=tutorial_path, - timeout_minutes=timeout_minutes, - env=env, - ) - total_time = time.monotonic() - start_time - print( - f"Finished executing tutorial {tid} in {total_time:.2f} seconds. " - ) - except CellTimeoutError: - has_errors = True - print( - f"Tutorial {tid} exceeded the maximum runtime of " - f"{timeout_minutes} minutes." - ) - except Exception as e: - has_errors = True - print(f"Encountered error running tutorial {tid}: \n {e}") - - # load notebook - with open(paths["tutorial_path"]) as infile: - nb_str = infile.read() - nb = nbformat.reads(nb_str, nbformat.NO_CONVERT) - # convert notebook to HTML - exporter = HTMLExporter(template_name="classic") - html, _ = exporter.from_notebook_node(nb) - - # pull out html div for notebook - soup = BeautifulSoup(html, "html.parser") - nb_meat = soup.find("div", {"id": "notebook-container"}) - del nb_meat.attrs["id"] - nb_meat.attrs["class"] = ["notebook"] - - # when output html, iframe it (useful for Ax reports) - for html_div in nb_meat.findAll("div", {"class": "output_html"}): - if html_div.html is not None: - iframe = soup.new_tag("iframe") - iframe.attrs["src"] = "data:text/html;charset=utf-8," + str( - html_div.html - ) - # replace `#` in CSS - iframe.attrs["src"] = iframe.attrs["src"].replace("#", "%23") - html_div.contents = [iframe] - - html_out = MOCK_JS_REQUIRES + str(nb_meat) - - # generate HTML file - with open(paths["html_path"], "w") as html_outfile: - html_outfile.write(html_out) - - # generate JS file - t_dir_js = t_dir if t_dir else "" - script = TEMPLATE.format( - t_dir=t_dir_js, - tid=tid, - total_time=total_time if total_time is not None else "null", - ) - with open(paths["js_path"], "w") as js_outfile: - js_outfile.write(script) - - # output tutorial in both ipynb & py form - nbformat.write(nb, paths["ipynb_path"]) - exporter = ScriptExporter() - script, _ = exporter.from_notebook_node(nb) - with open(paths["py_path"], "w") as py_outfile: - py_outfile.write(script) - - # create .tar archive (if necessary) - if t_dir is not None: - with tarfile.open(paths["tar_path"], "w:gz") as tar: - tar.add( - paths["tutorial_dir"], - arcname=os.path.basename(paths["tutorial_dir"]), - ) - - if has_errors: - raise Exception("There are errors in tutorials, will not continue to publish") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Generate JS, HTML, ipynb, and py files for tutorials." - ) - parser.add_argument( - "-w", "--repo_dir", metavar="path", required=True, help="Ax repo directory." - ) - parser.add_argument( - "-s", "--smoke", action="store_true", help="Run in smoke test mode." - ) - parser.add_argument( - "-e", - "--exec_tutorials", - action="store_true", - default=False, - help="Execute tutorials (instead of just converting).", - ) - parser.add_argument( - "-n", - "--name", - help="Run a specific tutorial by name. The name should not include the " - ".ipynb extension.", - ) - args = parser.parse_args() - gen_tutorials( - args.repo_dir, - args.exec_tutorials, - smoke_test=args.smoke, - name=args.name, - ) diff --git a/scripts/parse_sphinx.py b/scripts/parse_sphinx.py deleted file mode 100644 index 4a113f09ce9..00000000000 --- a/scripts/parse_sphinx.py +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import os - -from bs4 import BeautifulSoup - - -js_scripts = """ - - - - - - -""" - -search_js_scripts = """ - - - -""" - - -def parse_sphinx(input_dir: str, output_dir: str) -> None: - """Parse Sphinx HTML documentation and prepare for embedding in Docusaurus. - - Extracts content of div.document and wraps in div.sphinx. - - Also adds in JS deps if search page and modifies the JS to look for - documentation in updated root. - - Args: - input_dir: Input directory for HTML files generated by Sphinx. - output_dir: Output directory for parsed HTML files to be embedded in - Docusaurus. - - """ - for cur, _, files in os.walk(input_dir): - for fname in files: - if fname.endswith(".html"): - with open(os.path.join(cur, fname)) as f: - soup = BeautifulSoup(f.read(), "html.parser") - doc = soup.find("div", {"class": "document"}) - wrapped_doc = doc.wrap(soup.new_tag("div", **{"class": "sphinx"})) - # add js - if fname == "search.html": - out = js_scripts + search_js_scripts + str(wrapped_doc) - else: - out = js_scripts + str(wrapped_doc) - output_path = os.path.join(output_dir, os.path.relpath(cur, input_dir)) - os.makedirs(output_path, exist_ok=True) - with open(os.path.join(output_path, fname), "w") as fout: - fout.write(out) - - # update reference in JS file - with open(os.path.join(input_dir, "_static/searchtools.js")) as js_file: - js = js_file.read() - js = js.replace( - "DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/'", "'_sphinx-sources/'" - ) - with open(os.path.join(input_dir, "_static/searchtools.js"), "w") as js_file: - js_file.write(js) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Strip HTML body from Sphinx docs.") - parser.add_argument( - "-i", - "--input_dir", - metavar="path", - required=True, - help="Input directory for Sphinx HTML.", - ) - parser.add_argument( - "-o", - "--output_dir", - metavar="path", - required=True, - help="Output directory in Docusaurus.", - ) - args = parser.parse_args() - parse_sphinx(args.input_dir, args.output_dir) diff --git a/scripts/patch_site_config.py b/scripts/patch_site_config.py deleted file mode 100644 index 8bd504a2a46..00000000000 --- a/scripts/patch_site_config.py +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import re - - -def patch_config( - config_file: str, base_url: str = None, disable_algolia: bool = True -) -> None: - config = open(config_file).read() - - if base_url is not None: - config = re.sub("baseUrl = '/';", f"baseUrl = '{base_url}';", config) - if disable_algolia is True: - config = re.sub( - "const includeAlgolia = true;", "const includeAlgolia = false;", config - ) - - with open(config_file, "w") as outfile: - outfile.write(config) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Path Docusaurus siteConfig.js file when building site." - ) - parser.add_argument( - "-f", - "--config_file", - metavar="path", - required=True, - help="Path to configuration file.", - ) - parser.add_argument( - "-b", - "--base_url", - type=str, - required=False, - help="Value for baseUrl.", - default=None, - ) - parser.add_argument( - "--disable_algolia", - required=False, - action="store_true", - help="Disable algolia.", - ) - args = parser.parse_args() - patch_config(args.config_file, args.base_url, args.disable_algolia) diff --git a/scripts/publish_site.sh b/scripts/publish_site.sh deleted file mode 100644 index e532f21f008..00000000000 --- a/scripts/publish_site.sh +++ /dev/null @@ -1,229 +0,0 @@ -#!/bin/bash -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -# Exit if any error occurs -set -e -set -x - -usage() { - echo "Usage: $0 [-d] [-v VERSION]" - echo "" - echo "Build and push updated Ax site. Will either update latest or bump stable version." - echo "" - echo " -d Use Docusaurus bot GitHub credentials. If not specified, will use default GitHub credentials." - echo " -v=VERSION Build site for new library version. If not specified, will update latest." - echo "" - exit 1 -} - -VERSION=false -DOCUSAURUS_BOT=false - -while getopts 'dhk:v:' option; do - case "${option}" in - d) - DOCUSAURUS_BOT=true - ;; - h) - usage - ;; - v) - VERSION=${OPTARG} - ;; - *) - usage - ;; - esac -done - -# Function to get absolute filename -fullpath() { - echo "$(cd "$(dirname "$1")" || exit; pwd -P)/$(basename "$1")" -} - -# Current directory (needed for cleanup later) -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -# Make temporary directory -WORK_DIR=$(mktemp -d) -cd "${WORK_DIR}" || exit - -if [[ $DOCUSAURUS_BOT == true ]]; then - # Setup git credentials - git config --global user.name "Ax Website Deployment Script" - git config --global user.email "docusaurus-bot@users.noreply.github.com" - echo "machine github.com login docusaurus-bot password ${DOCUSAURUS_PUBLISH_TOKEN}" > ~/.netrc - - # Clone both main & gh-pages branches - git clone https://docusaurus-bot@github.com/facebook/Ax.git Ax-main - git clone --branch gh-pages https://docusaurus-bot@github.com/facebook/Ax.git Ax-gh-pages -else - git clone https://github.com/facebook/Ax.git Ax-main - git clone --branch gh-pages https://github.com/facebook/Ax.git Ax-gh-pages -fi - -# A few notes about the script below: -# * Docusaurus versioning was designed to *only* version the markdown -# files in the docs/ subdirectory. We are repurposing parts of Docusaurus -# versioning, but snapshotting the entire site. Versions of the site are -# stored in the versions/ subdirectory on gh-pages: -# -# --gh-pages/ -# |-- api/ -# |-- css/ -# |-- docs/ -# | ... -# |-- versions/ -# | |-- 1.0.1/ -# | |-- 1.0.2/ -# | | ... -# | |-- latest/ -# | .. -# |-- versions.html -# -# * The stable version is in the top-level directory. It is also -# placed into the versions/ subdirectory so that it does not need to -# be built again when the version is augmented. -# * We want to support serving / building the Docusaurus site locally -# without any versions. This means that we have to keep versions.js -# outside of the website/ subdirectory. -# * We do not want to have a tracked file that contains all versions of -# the site or the latest version. Instead, we determine this at runtime. -# We use what's on gh-pages in the versions subdirectory as the -# source of truth for available versions and use the latest tag on -# the main branch as the source of truth for the latest version. - -if [[ $VERSION == false ]]; then - echo "-----------------------------------------" - echo "Updating latest (main) version of site " - echo "-----------------------------------------" - - # Populate _versions.json from existing versions; this is used - # by versions.js & needed to build the site (note that we don't actually - # use versions.js for latest build, but we do need versions.js - # in website/pages in order to use docusaurus-versions) - CMD="import os, json; " - CMD+="vs = [v for v in os.listdir('Ax-gh-pages/versions') if v != 'latest' and not v.startswith('.')]; " - CMD+="print(json.dumps(vs))" - python3 -c "$CMD" > Ax-main/website/_versions.json - - # Move versions.js to website subdirectory. - # This is the page you see when click on version in navbar. - cp Ax-main/scripts/versions.js Ax-main/website/pages/en/versions.js - cd Ax-main/website || exit - - # Replace baseUrl (set to /versions/latest/) & disable Algolia - CONFIG_FILE=$(fullpath "siteConfig.js") - python3 ../scripts/patch_site_config.py -f "${CONFIG_FILE}" -b "/versions/latest/" --disable_algolia - - # Tag site with "latest" version - yarn - yarn run version latest - - # Build site - cd .. || exit - ./scripts/make_docs.sh -b -t - rm -rf ../website/build/Ax/docs/next # don't need this - - # Move built site to gh-pages (but keep old versions.js) - cd "${WORK_DIR}" || exit - cp Ax-gh-pages/versions/latest/versions.html versions.html - rm -rf Ax-gh-pages/versions/latest - mv Ax-main/website/build/Ax Ax-gh-pages/versions/latest - # versions.html goes both in top-level and under en/ (default language) - cp versions.html Ax-gh-pages/versions/latest/versions.html - cp versions.html Ax-gh-pages/versions/latest/en/versions.html - - # erase git history then force push to overwrite - cd Ax-gh-pages || exit - rm -rf .git - git init -b main - git add --all - git commit -m 'Update latest version of site' - git push --force https://github.com/facebook/Ax.git main:gh-pages - -else - echo "-----------------------------------------" - echo "Building new version ($VERSION) of site " - echo "-----------------------------------------" - - # Checkout main branch with specified tag - cd Ax-main || exit - git fetch --tags - git checkout "${VERSION}" - - # Populate _versions.json from existing versions; this contains a list - # of versions present in gh-pages (excluding latest). This is then used - # to populate versions.js (which forms the page that people see when they - # click on version number in navbar). - # Note that this script doesn't allow building a version of the site that - # is already on gh-pages. - CMD="import os, json; " - CMD+="vs = [v for v in os.listdir('../Ax-gh-pages/versions') if v != 'latest' and not v.startswith('.')]; " - CMD+="assert '${VERSION}' not in vs, '${VERSION} is already on gh-pages.'; " - CMD+="vs.append('${VERSION}'); " - CMD+="print(json.dumps(vs))" - python3 -c "$CMD" > website/_versions.json - - cp scripts/versions.js website/pages/en/versions.js - - # Set Docusaurus version as 'stable' - cd website || exit - yarn - yarn run version stable - - # Build new version of site (this will be stable, default version) - # Execute tutorials - cd .. || exit - ./scripts/make_docs.sh -b -t - - # Move built site to new folder (new-site) & carry over old versions - # from existing gh-pages - cd "${WORK_DIR}" || exit - rm -rf Ax-main/website/build/Ax/docs/next # don't need this - mv Ax-main/website/build/Ax new-site - mv Ax-gh-pages/versions new-site/versions - - # Build new version of site (to be placed in versions/$VERSION/) - # the only thing that changes here is the baseUrl (for nav purposes) - # we build this now so that in the future, we can just bump version and not move - # previous stable to versions - cd Ax-main/website || exit - - # Replace baseUrl & disable Algolia - CONFIG_FILE=$(fullpath "siteConfig.js") - python3 ../scripts/patch_site_config.py -f "${CONFIG_FILE}" -b "/versions/${VERSION}/" --disable_algolia - - # Set Docusaurus version with exact version & build - yarn run version "${VERSION}" - cd .. || exit - # Only run Docusaurus (skip tutorial build & Sphinx) - ./scripts/make_docs.sh -b -o - rm -rf website/build/Ax/docs/next # don't need this - rm -rf website/build/Ax/docs/stable # or this - mv website/build/Ax "../new-site/versions/${VERSION}" - - # Need to run script to update versions.js for previous versions in - # new-site/versions with the newly built versions.js. Otherwise, - # the versions.js for older versions in versions subdirectory - # won't be up-to-date and will not have a way to navigate back to - # newer versions. This is the only part of the old versions that - # needs to be updated when a new version is built. - cd "${WORK_DIR}" || exit - python3 Ax-main/scripts/update_versions_html.py -p "${WORK_DIR}" - - # Init as Git repo and push to gh-pages - cd new-site || exit - git init -b main - git add --all - git commit -m "Publish version ${VERSION} of site" - git push --force https://github.com/facebook/Ax.git main:gh-pages - -fi - -# Clean up -cd "${SCRIPT_DIR}" || exit -rm -rf "${WORK_DIR}" diff --git a/scripts/run_tutorials.py b/scripts/run_tutorials.py new file mode 100644 index 00000000000..3c8f1cfeb8c --- /dev/null +++ b/scripts/run_tutorials.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import json +import os +import time +from pathlib import Path + +import papermill +from nbclient.exceptions import CellTimeoutError + +TUTORIALS_TO_SKIP = [ + "raytune_pytorch_cnn", # TODO: Times out CI but passes locally. Investigate. +] + + +def run_script( + tutorial: Path, timeout_minutes: int, env: dict[str, str] | None = None +) -> None: + if env is not None: + os.environ.update(env) + papermill.execute_notebook( + tutorial, + tutorial, + # This timeout is on cell-execution time, not on total runtime. + execution_timeout=timeout_minutes * 60, + ) + + +def run_tutorials( + repo_dir: str, + name: str | None = None, + smoke_test: bool = False, +) -> None: + """Run Jupyter notebooks. + + We check in the tutorial notebook un-run, and run them in CI as integration tests. + """ + has_errors = False + + with open(os.path.join(repo_dir, "website", "tutorials.json")) as infile: + tutorial_config = json.loads(infile.read()) + # flatten config dict + tutorial_configs = [ + config for category in tutorial_config.values() for config in category + ] + # Running only the tutorial described by "name" + if name is not None: + tutorial_configs = [d for d in tutorial_configs if d["id"] == name] + if len(tutorial_configs) == 0: + raise RuntimeError(f"No tutorial found with name {name}.") + # prepare paths for converted tutorials & files + env = {"SMOKE_TEST": "True"} if smoke_test else None + + for config in tutorial_configs: + tid = config["id"] + tutorial_path = os.path.join(repo_dir, "tutorials", tid, f"{tid}.ipynb") + + total_time = None + + if tid in TUTORIALS_TO_SKIP: + print(f"Skipping execution of {tid}") + continue + else: + print(f"Executing tutorial {tid}") + start_time = time.monotonic() + + # Try / catch failures for now. We will re-raise at the end. + timeout_minutes = 15 if smoke_test else 150 + try: + # Execute notebook. + run_script( + tutorial=tutorial_path, + timeout_minutes=timeout_minutes, + env=env, + ) + total_time = time.monotonic() - start_time + print( + f"Finished executing tutorial {tid} in {total_time:.2f} seconds. " + ) + except CellTimeoutError: + has_errors = True + print( + f"Tutorial {tid} exceeded the maximum runtime of " + f"{timeout_minutes} minutes." + ) + except Exception as e: + has_errors = True + print(f"Encountered error running tutorial {tid}: \n {e}") + + if has_errors: + raise Exception("There are errors in tutorials, will not continue to publish") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Generate JS, HTML, ipynb, and py files for tutorials." + ) + parser.add_argument( + "-w", "--repo_dir", metavar="path", required=True, help="Ax repo directory." + ) + parser.add_argument( + "-s", "--smoke", action="store_true", help="Run in smoke test mode." + ) + parser.add_argument( + "-n", + "--name", + help="Run a specific tutorial by name. The name should not include the " + ".ipynb extension.", + ) + args = parser.parse_args() + run_tutorials( + args.repo_dir, + smoke_test=args.smoke, + name=args.name, + ) diff --git a/scripts/update_versions_html.py b/scripts/update_versions_html.py deleted file mode 100644 index 0457511d6e9..00000000000 --- a/scripts/update_versions_html.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Meta Platforms, Inc. and affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import json - -from bs4 import BeautifulSoup - - -BASE_URL = "/" - - -def updateVersionHTML(base_path, base_url=BASE_URL): - with open(base_path + "/Ax-main/website/_versions.json", "rb") as infile: - versions = json.loads(infile.read()) - - with open(base_path + "/new-site/versions.html", "rb") as infile: - html = infile.read() - - versions.append("latest") - - def prepend_url(a_tag, base_url, version): - href = a_tag.attrs["href"] - if href.startswith("https://") or href.startswith("http://"): - return href - else: - return "{base_url}versions/{version}{original_url}".format( - base_url=base_url, version=version, original_url=href - ) - - for v in versions: - soup = BeautifulSoup(html, "html.parser") - - # title - title_link = soup.find("header").find("a") - title_link.attrs["href"] = prepend_url(title_link, base_url, v) - - # nav - nav_links = soup.find("nav").findAll("a") - for link in nav_links: - link.attrs["href"] = prepend_url(link, base_url, v) - - # version link - t = soup.find("h2", {"class": "headerTitleWithLogo"}).find_next("a") - t.attrs["href"] = prepend_url(t, base_url, v) - h3 = t.find("h3") - h3.string = v - - # output files - with open(base_path + f"/new-site/versions/{v}/versions.html", "w") as outfile: - outfile.write(str(soup)) - with open( - base_path + f"/new-site/versions/{v}/en/versions.html", "w" - ) as outfile: - outfile.write(str(soup)) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description=( - "Fix links in version.html files for Docusaurus site." - "This is used to ensure that the versions.js for older " - "versions in versions subdirectory are up-to-date and " - "will have a way to navigate back to newer versions." - ) - ) - parser.add_argument( - "-p", - "--base_path", - metavar="path", - required=True, - help="Input directory for rolling out new version of site.", - ) - args = parser.parse_args() - updateVersionHTML(args.base_path) diff --git a/scripts/versions.js b/scripts/versions.js deleted file mode 100644 index 7ca92da8780..00000000000 --- a/scripts/versions.js +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - * - * @format - */ - -const React = require('react'); - -const CompLibrary = require('../../core/CompLibrary'); - -const Container = CompLibrary.Container; - -const CWD = process.cwd(); - -const versions = require(`${CWD}/_versions.json`); -// Sort the versions, handling the version numbers and extra characters -versions.sort(function(a, b) { - a = a.replace("v", ""); - b = b.replace("v", ""); - var aArr = a.split("."); - var bArr = b.split("."); - if (aArr.len !== bArr.len) { - throw 'Version formats do not match'; - } - - var aInt, bInt; - for (var i = 0 ; i < aArr.length; i++) { - aInt = parseInt(aArr[i]); - bInt = parseInt(bArr[i]); - if (aInt === bInt) { - continue; - } - return aInt - bInt; - } - return 0; -}).reverse(); - - -function Versions(props) { - const {config: siteConfig} = props; - const baseUrl = siteConfig.baseUrl; - const latestVersion = versions[0]; - return ( -
- -
-
-

{siteConfig.title} Versions

-
- - - - - - - - - - - - - - - - - - - -
VersionInstall withDocumentation
{`stable (${latestVersion})`} - pip3 install ax-platform - - stable -
- {'latest'} - {' (main)'} - - - pip3 install git+ssh://git@github.com/facebook/Ax.git - - - latest -
- -

Past Versions

- - - {versions.map( - version => - version !== latestVersion && ( - - - - - ), - )} - -
{version} - - Documentation - -
-
-
-
- ); -} - -module.exports = Versions; diff --git a/setup.py b/setup.py index 1be63417b4a..99931affeb7 100644 --- a/setup.py +++ b/setup.py @@ -43,12 +43,15 @@ "pytest-cov", "sphinx", "sphinx-autodoc-typehints", + "sphinx_rtd_theme", "torchvision>=0.5.0", "nbconvert", "jupyter-client==6.1.12", # Replace with `tensorboard >= x.x` once tb cuts a release. # https://github.com/tensorflow/tensorboard/issues/6869#issuecomment-2273718763 "numpy<2.0", + "lxml", + "mdformat-myst", ] MYSQL_REQUIRES = ["SQLAlchemy==1.4.17"] @@ -75,6 +78,7 @@ "pytorch-lightning", # For the early stopping tutorial. "papermill", # For executing the tutorials. "submitit", # Required for building the SubmitIt notebook. + "mdformat", ] diff --git a/sphinx/Makefile b/sphinx/Makefile index 52636c09d35..5c459aa146c 100644 --- a/sphinx/Makefile +++ b/sphinx/Makefile @@ -5,7 +5,7 @@ SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = -BUILDDIR = build +BUILDDIR = ../website/build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) @@ -49,17 +49,17 @@ help: @echo " coverage to run coverage check of the documentation (if enabled)" clean: - rm -rf $(BUILDDIR)/* + rm -rf $(BUILDDIR)/api/* html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/api @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + @echo "Build finished. The HTML pages are in $(BUILDDIR)/api." dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/api @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + @echo "Build finished. The HTML pages are in $(BUILDDIR)/api." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml diff --git a/sphinx/source/_static/custom.css b/sphinx/source/_static/custom.css new file mode 100644 index 00000000000..827ccb0baf8 --- /dev/null +++ b/sphinx/source/_static/custom.css @@ -0,0 +1,3 @@ +.wy-nav-content { + max-width: initial; +} diff --git a/sphinx/source/conf.py b/sphinx/source/conf.py index 234ba679ea7..414a273a956 100644 --- a/sphinx/source/conf.py +++ b/sphinx/source/conf.py @@ -42,6 +42,7 @@ "sphinx.ext.todo", "sphinx.ext.coverage", "sphinx.ext.viewcode", + "sphinx_rtd_theme", ] # Add any paths that contain templates here, relative to this directory. @@ -121,7 +122,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = "alabaster" +html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -150,7 +151,9 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = [] +html_static_path = ["_static"] + +html_css_files = ['custom.css'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied diff --git a/tutorials/early_stopping/early_stopping.ipynb b/tutorials/early_stopping/early_stopping.ipynb index 1444a2a0372..2c1541f8cda 100644 --- a/tutorials/early_stopping/early_stopping.ipynb +++ b/tutorials/early_stopping/early_stopping.ipynb @@ -15,6 +15,20 @@ "NOTE: Although the original NAS tutorial is for a multi-objective problem, this tutorial focuses on a single objective (validation accuracy) problem. Early stopping currently does not support \\\"true\\\" multi-objective stopping, although one can use [logical compositions of early stopping strategies](https://github.com/facebook/Ax/blob/main/ax/early_stopping/strategies/logical.py) to target multiple objectives separately. Early stopping for the multi-objective case is currently a work in progress." ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "779ea790", + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "import plotly.io as pio\n", + "if 'google.colab' in sys.modules:\n", + " pio.renderers.default = \"colab\"\n", + " %pip install ax-platform" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/tutorials/external_generation_node.ipynb b/tutorials/external_generation_node/external_generation_node.ipynb similarity index 98% rename from tutorials/external_generation_node.ipynb rename to tutorials/external_generation_node/external_generation_node.ipynb index 5817bfa5bf8..2e9b6690937 100644 --- a/tutorials/external_generation_node.ipynb +++ b/tutorials/external_generation_node/external_generation_node.ipynb @@ -20,6 +20,19 @@ "NOTE: This is for illustration purposes only. We do not recommend using this strategy as it typically does not perform well compared to Ax's default algorithms due to it's overly greedy behavior." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "import plotly.io as pio\n", + "if 'google.colab' in sys.modules:\n", + " pio.renderers.default = \"colab\"\n", + " %pip install ax-platform" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/tutorials/factorial.ipynb b/tutorials/factorial/factorial.ipynb similarity index 98% rename from tutorials/factorial.ipynb rename to tutorials/factorial/factorial.ipynb index 63c559635d6..29f54eff6bc 100644 --- a/tutorials/factorial.ipynb +++ b/tutorials/factorial/factorial.ipynb @@ -24,6 +24,18 @@ "In this example, we first run an exploratory batch to collect data on all possible combinations. Then we use empirical Bayes to model the data and shrink noisy estimates toward the mean. Next, we use Thompson Sampling to suggest a set of arms (combinations of factors and levels) on which to collect more data. We repeat the process until we have identified the best performing combination(s)." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "in_colab = 'google.colab' in sys.modules\n", + "if in_colab:\n", + " %pip install ax-platform" + ] + }, { "cell_type": "code", "execution_count": null, @@ -75,7 +87,10 @@ }, "outputs": [], "source": [ - "init_notebook_plotting()" + "import plotly.io as pio\n", + "init_notebook_plotting()\n", + "if in_colab:\n", + " pio.renderers.default = \"colab\"" ] }, { diff --git a/tutorials/generation_strategy.ipynb b/tutorials/generation_strategy/generation_strategy.ipynb similarity index 98% rename from tutorials/generation_strategy.ipynb rename to tutorials/generation_strategy/generation_strategy.ipynb index 5d2ebe2810c..4f585cc9a54 100644 --- a/tutorials/generation_strategy.ipynb +++ b/tutorials/generation_strategy/generation_strategy.ipynb @@ -1,5 +1,18 @@ { "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "import plotly.io as pio\n", + "if 'google.colab' in sys.modules:\n", + " pio.renderers.default = \"colab\"\n", + " %pip install ax-platform" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/tutorials/gpei_hartmann_developer.ipynb b/tutorials/gpei_hartmann_developer/gpei_hartmann_developer.ipynb similarity index 98% rename from tutorials/gpei_hartmann_developer.ipynb rename to tutorials/gpei_hartmann_developer/gpei_hartmann_developer.ipynb index 4b216818bac..a0adc099c5f 100644 --- a/tutorials/gpei_hartmann_developer.ipynb +++ b/tutorials/gpei_hartmann_developer/gpei_hartmann_developer.ipynb @@ -14,6 +14,18 @@ "The Developer API is suitable when the user wants maximal customization of the optimization loop. This tutorial demonstrates optimization of a Hartmann6 function using the `Experiment` construct. In this example, trials will be evaluated synchronously." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "in_colab = 'google.colab' in sys.modules\n", + "if in_colab:\n", + " %pip install ax-platform" + ] + }, { "cell_type": "code", "execution_count": null, @@ -44,8 +56,11 @@ ")\n", "from ax.modelbridge.registry import Models\n", "from ax.utils.notebook.plotting import init_notebook_plotting, render\n", + "import plotly.io as pio\n", "\n", - "init_notebook_plotting()" + "init_notebook_plotting()\n", + "if in_colab:\n", + " pio.renderers.default = \"colab\"" ] }, { diff --git a/tutorials/gpei_hartmann_loop.ipynb b/tutorials/gpei_hartmann_loop/gpei_hartmann_loop.ipynb similarity index 95% rename from tutorials/gpei_hartmann_loop.ipynb rename to tutorials/gpei_hartmann_loop/gpei_hartmann_loop.ipynb index 119970e1788..320df994853 100644 --- a/tutorials/gpei_hartmann_loop.ipynb +++ b/tutorials/gpei_hartmann_loop/gpei_hartmann_loop.ipynb @@ -11,6 +11,18 @@ "For more customizability of the optimization procedure, consider the Service or Developer API." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "in_colab = 'google.colab' in sys.modules\n", + "if in_colab:\n", + " %pip install ax-platform" + ] + }, { "cell_type": "code", "execution_count": null, @@ -25,8 +37,11 @@ "from ax.service.managed_loop import optimize\n", "from ax.utils.measurement.synthetic_functions import hartmann6\n", "from ax.utils.notebook.plotting import init_notebook_plotting, render\n", + "import plotly.io as pio\n", "\n", - "init_notebook_plotting()" + "init_notebook_plotting()\n", + "if in_colab:\n", + " pio.renderers.default = \"colab\"" ] }, { diff --git a/tutorials/gpei_hartmann_service.ipynb b/tutorials/gpei_hartmann_service/gpei_hartmann_service.ipynb similarity index 98% rename from tutorials/gpei_hartmann_service.ipynb rename to tutorials/gpei_hartmann_service/gpei_hartmann_service.ipynb index 177f7a182d4..c56aa2e5542 100644 --- a/tutorials/gpei_hartmann_service.ipynb +++ b/tutorials/gpei_hartmann_service/gpei_hartmann_service.ipynb @@ -15,6 +15,18 @@ "- Repeat" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "in_colab = 'google.colab' in sys.modules\n", + "if in_colab:\n", + " %pip install ax-platform" + ] + }, { "cell_type": "code", "execution_count": null, @@ -24,8 +36,11 @@ "from ax.service.ax_client import AxClient, ObjectiveProperties\n", "from ax.utils.measurement.synthetic_functions import hartmann6\n", "from ax.utils.notebook.plotting import init_notebook_plotting, render\n", + "import plotly.io as pio\n", "\n", - "init_notebook_plotting()" + "init_notebook_plotting()\n", + "if in_colab:\n", + " pio.renderers.default = \"colab\"" ] }, { diff --git a/tutorials/gss.ipynb b/tutorials/gss/gss.ipynb similarity index 97% rename from tutorials/gss.ipynb rename to tutorials/gss/gss.ipynb index 6e6ff3d8aed..3fcc7260d31 100644 --- a/tutorials/gss.ipynb +++ b/tutorials/gss/gss.ipynb @@ -13,6 +13,18 @@ "Global Stopping stops an optimization loop when some data-based criteria are met which suggest that future trials will not be very helpful. For example, we might stop when there has been very little improvement in the last five trials. This is as opposed to trial-level early stopping, which monitors the results of expensive evaluations and terminates those that are unlikely to produce promising results, freeing resources to explore more promising configurations. For more on trial-level early stopping, see the tutorial: https://ax.dev/tutorials/early_stopping/early_stopping.html" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "in_colab = 'google.colab' in sys.modules\n", + "if in_colab:\n", + " %pip install ax-platform" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -30,8 +42,11 @@ "from ax.service.ax_client import AxClient, ObjectiveProperties\n", "from ax.utils.measurement.synthetic_functions import Branin, branin\n", "from ax.utils.notebook.plotting import init_notebook_plotting, render\n", + "import plotly.io as pio\n", "\n", - "init_notebook_plotting()" + "init_notebook_plotting()\n", + "if in_colab:\n", + " pio.renderers.default = \"colab\"" ] }, { diff --git a/tutorials/human_in_the_loop/human_in_the_loop.ipynb b/tutorials/human_in_the_loop/human_in_the_loop.ipynb index 93af094fb7a..1f0d274c1ec 100644 --- a/tutorials/human_in_the_loop/human_in_the_loop.ipynb +++ b/tutorials/human_in_the_loop/human_in_the_loop.ipynb @@ -39,6 +39,18 @@ "For this tutorial, we will assume our experiment has already been created." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "in_colab = 'google.colab' in sys.modules\n", + "if in_colab:\n", + " %pip install ax-platform" + ] + }, { "cell_type": "code", "execution_count": null, @@ -63,8 +75,11 @@ "from ax.utils.notebook.plotting import render, init_notebook_plotting\n", "\n", "import pandas as pd\n", + "import plotly.io as pio\n", "\n", - "init_notebook_plotting()" + "init_notebook_plotting()\n", + "if in_colab:\n", + " pio.renderers.default = \"colab\"" ] }, { diff --git a/tutorials/modular_botax.ipynb b/tutorials/modular_botax/modular_botax.ipynb similarity index 82% rename from tutorials/modular_botax.ipynb rename to tutorials/modular_botax/modular_botax.ipynb index dc8f72147ec..444c47fb1f7 100644 --- a/tutorials/modular_botax.ipynb +++ b/tutorials/modular_botax/modular_botax.ipynb @@ -1,44 +1,54 @@ { - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 5, "cells": [ { "cell_type": "code", + "execution_count": null, + "id": "dc0b0d48", + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "import plotly.io as pio\n", + "if 'google.colab' in sys.modules:\n", + " pio.renderers.default = \"colab\"\n", + " %pip install ax-platform" + ] + }, + { + "cell_type": "code", + "execution_count": 1, "metadata": { + "collapsed": false, + "customOutput": null, + "executionStartTime": 1730916291451, + "executionStopTime": 1730916298337, + "id": "about-preview", + "isAgentGenerated": false, + "language": "python", "metadata": { "originalKey": "cca773d8-5e94-4b5a-ae54-22295be8936a" }, - "id": "about-preview", "originalKey": "f4e8ae18-2aa3-4943-a15a-29851889445c", "outputsInitialized": true, - "isAgentGenerated": false, - "language": "python", - "executionStartTime": 1730916291451, - "executionStopTime": 1730916298337, - "serverExecutionDuration": 4531.2523420434, - "collapsed": false, "requestMsgId": "f4e8ae18-2aa3-4943-a15a-29851889445c", - "customOutput": null + "serverExecutionDuration": 4531.2523420434 }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I1106 100452.333 _utils_internal.py:321] NCCL_DEBUG env var is set to None\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I1106 100452.334 _utils_internal.py:339] NCCL_DEBUG is forced to WARN from None\n" + ] + } + ], "source": [ "from typing import Any, Dict, Optional, Tuple, Type\n", "\n", @@ -71,37 +81,20 @@ "# BoTorch components\n", "from botorch.models.model import Model\n", "from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood" - ], - "execution_count": 1, - "outputs": [ - { - "output_type": "stream", - "name": "stderr", - "text": [ - "I1106 100452.333 _utils_internal.py:321] NCCL_DEBUG env var is set to None\n" - ] - }, - { - "output_type": "stream", - "name": "stderr", - "text": [ - "I1106 100452.334 _utils_internal.py:339] NCCL_DEBUG is forced to WARN from None\n" - ] - } ] }, { "cell_type": "markdown", "metadata": { + "id": "northern-affairs", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "58ea5ebf-ff3a-40b4-8be3-1b85c99d1c4a" }, - "id": "northern-affairs", "originalKey": "c9a665ca-497e-4d7c-bbb5-1b9f8d1d311c", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "# Setup and Usage of BoTorch Models in Ax\n", @@ -127,15 +120,15 @@ { "cell_type": "markdown", "metadata": { + "id": "pending-support", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "c06d1b5c-067d-4618-977e-c8269a98bd0a" }, - "id": "pending-support", "originalKey": "4706d02e-6b3f-4161-9e08-f5a31328b1d1", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "## 1. Quick-start example\n", @@ -145,54 +138,64 @@ }, { "cell_type": "code", + "execution_count": 2, "metadata": { + "collapsed": false, + "customOutput": null, + "executionStartTime": 1730916294801, + "executionStopTime": 1730916298389, + "id": "parental-sending", + "isAgentGenerated": false, + "language": "python", "metadata": { "originalKey": "72934cf2-4ecf-483a-93bd-4df88b19a7b8" }, - "id": "parental-sending", "originalKey": "20f25ded-5aae-47ee-955e-a2d5a2a1fe09", "outputsInitialized": true, - "isAgentGenerated": false, - "language": "python", - "executionStartTime": 1730916294801, - "executionStopTime": 1730916298389, - "serverExecutionDuration": 22.605526028201, - "collapsed": false, "requestMsgId": "20f25ded-5aae-47ee-955e-a2d5a2a1fe09", - "customOutput": null + "serverExecutionDuration": 22.605526028201 }, - "source": [ - "experiment = get_branin_experiment(with_trial=True)\n", - "data = get_branin_data(trials=[experiment.trials[0]])" - ], - "execution_count": 2, "outputs": [ { - "output_type": "stream", "name": "stderr", + "output_type": "stream", "text": [ "[INFO 11-06 10:04:56] ax.core.experiment: The is_test flag has been set to True. This flag is meant purely for development and integration testing purposes. If you are running a live experiment, please set this flag to False\n" ] } + ], + "source": [ + "experiment = get_branin_experiment(with_trial=True)\n", + "data = get_branin_data(trials=[experiment.trials[0]])" ] }, { "cell_type": "code", + "execution_count": 3, "metadata": { + "collapsed": false, + "executionStartTime": 1730916295849, + "executionStopTime": 1730916299900, + "id": "rough-somerset", + "isAgentGenerated": false, + "language": "python", "metadata": { "originalKey": "e571212c-7872-4ebc-b646-8dad8d4266fd" }, - "id": "rough-somerset", "originalKey": "c0806cce-a1d3-41b8-96fc-678aa3c9dd92", "outputsInitialized": true, - "isAgentGenerated": false, - "language": "python", - "executionStartTime": 1730916295849, - "executionStopTime": 1730916299900, - "serverExecutionDuration": 852.73489891551, - "collapsed": false, - "requestMsgId": "c0806cce-a1d3-41b8-96fc-678aa3c9dd92" + "requestMsgId": "c0806cce-a1d3-41b8-96fc-678aa3c9dd92", + "serverExecutionDuration": 852.73489891551 }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[INFO 11-06 10:04:57] ax.modelbridge.transforms.standardize_y: Outcome branin is constant, within tolerance.\n" + ] + } + ], "source": [ "# `Models` automatically selects a model + model bridge combination.\n", "# For `BOTORCH_MODULAR`, it will select `BoTorchModel` and `TorchModelBridge`.\n", @@ -204,30 +207,20 @@ " ), # Optional, will use default if unspecified\n", " botorch_acqf_class=qLogNoisyExpectedImprovement, # Optional, will use default if unspecified\n", ")" - ], - "execution_count": 3, - "outputs": [ - { - "output_type": "stream", - "name": "stderr", - "text": [ - "[INFO 11-06 10:04:57] ax.modelbridge.transforms.standardize_y: Outcome branin is constant, within tolerance.\n" - ] - } ] }, { "cell_type": "markdown", "metadata": { + "id": "hairy-wiring", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "fba91372-7aa6-456d-a22b-78ab30c26cd8" }, - "id": "hairy-wiring", "originalKey": "46f5c2c7-400d-4d8d-b0b9-a241657b173f", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "Now we can use this model to generate candidates (`gen`), predict outcome at a point (`predict`), or evaluate acquisition function value at a given point (`evaluate_acquisition_function`)." @@ -235,49 +228,51 @@ }, { "cell_type": "code", + "execution_count": 4, "metadata": { + "collapsed": false, + "executionStartTime": 1730916299852, + "executionStopTime": 1730916300305, + "id": "consecutive-summary", + "isAgentGenerated": false, + "language": "python", "metadata": { "originalKey": "59582fc6-8089-4320-864e-d98ee271d4f7" }, - "id": "consecutive-summary", "originalKey": "f64e9d2e-bfd4-47da-8292-dbe7e70cbe1f", "outputsInitialized": true, - "isAgentGenerated": false, - "language": "python", - "executionStartTime": 1730916299852, - "executionStopTime": 1730916300305, - "serverExecutionDuration": 233.20194100961, - "collapsed": false, - "requestMsgId": "f64e9d2e-bfd4-47da-8292-dbe7e70cbe1f" + "requestMsgId": "f64e9d2e-bfd4-47da-8292-dbe7e70cbe1f", + "serverExecutionDuration": 233.20194100961 }, - "source": [ - "generator_run = model_bridge_with_GPEI.gen(n=1)\n", - "generator_run.arms[0]" - ], - "execution_count": 4, "outputs": [ { - "output_type": "execute_result", "data": { - "text/plain": "Arm(parameters={'x1': 10.0, 'x2': 15.0})" + "text/plain": [ + "Arm(parameters={'x1': 10.0, 'x2': 15.0})" + ] }, + "execution_count": 4, "metadata": {}, - "execution_count": 4 + "output_type": "execute_result" } + ], + "source": [ + "generator_run = model_bridge_with_GPEI.gen(n=1)\n", + "generator_run.arms[0]" ] }, { "cell_type": "markdown", "metadata": { + "id": "diverse-richards", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "8cfe0fa9-8cce-4718-ba43-e8a63744d626" }, - "id": "diverse-richards", "originalKey": "804bac30-db07-4444-98a2-7a5f05007495", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "-----\n", @@ -290,15 +285,15 @@ { "cell_type": "markdown", "metadata": { + "id": "grand-committee", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "7037fd14-bcfe-44f9-b915-c23915d2bda9" }, - "id": "grand-committee", "originalKey": "31b54ce5-2590-4617-b10c-d24ed3cce51d", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "## 2. BoTorchModel = Surrogate + Acquisition\n", @@ -309,15 +304,15 @@ { "cell_type": "markdown", "metadata": { + "id": "thousand-blanket", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "08b12c6c-14da-4342-95bd-f607a131ce9d" }, - "id": "thousand-blanket", "originalKey": "4a4e006e-07fa-4d63-8b9a-31b67075e40e", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "### 2A. Example that uses defaults and requires no options\n", @@ -327,21 +322,23 @@ }, { "cell_type": "code", + "execution_count": 5, "metadata": { + "collapsed": false, + "executionStartTime": 1730916302730, + "executionStopTime": 1730916304031, + "id": "changing-xerox", + "isAgentGenerated": false, + "language": "python", "metadata": { "originalKey": "b1bca702-07b2-4818-b2b9-2107268c383c" }, - "id": "changing-xerox", "originalKey": "fa86552a-0b80-4040-a0c4-61a0de37bdc1", "outputsInitialized": true, - "isAgentGenerated": false, - "language": "python", - "executionStartTime": 1730916302730, - "executionStopTime": 1730916304031, - "serverExecutionDuration": 1.7747740494087, - "collapsed": false, - "requestMsgId": "fa86552a-0b80-4040-a0c4-61a0de37bdc1" + "requestMsgId": "fa86552a-0b80-4040-a0c4-61a0de37bdc1", + "serverExecutionDuration": 1.7747740494087 }, + "outputs": [], "source": [ "# The surrogate is not specified, so it will be auto-selected\n", "# during `model.fit`.\n", @@ -357,22 +354,20 @@ "\n", "# Both the surrogate and acquisition class will be auto-selected.\n", "GPEI_model = BoTorchModel()" - ], - "execution_count": 5, - "outputs": [] + ] }, { "cell_type": "markdown", "metadata": { + "id": "lovely-mechanics", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "5cec0f06-ae2c-47d3-bd95-441c45762e38" }, - "id": "lovely-mechanics", "originalKey": "7b9fae38-fe5d-4e5b-8b5f-2953c1ef09d2", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "### 2B. Example with all the options\n", @@ -381,21 +376,23 @@ }, { "cell_type": "code", + "execution_count": 6, "metadata": { + "collapsed": false, + "executionStartTime": 1730916305930, + "executionStopTime": 1730916306168, + "id": "twenty-greek", + "isAgentGenerated": false, + "language": "python", "metadata": { "originalKey": "25b13c48-edb0-4b3f-ba34-4f4a4176162a" }, - "id": "twenty-greek", "originalKey": "8d824e37-b087-4bab-9b16-4354e9509df7", "outputsInitialized": true, - "isAgentGenerated": false, - "language": "python", - "executionStartTime": 1730916305930, - "executionStopTime": 1730916306168, - "serverExecutionDuration": 2.6916969800368, - "collapsed": false, - "requestMsgId": "8d824e37-b087-4bab-9b16-4354e9509df7" + "requestMsgId": "8d824e37-b087-4bab-9b16-4354e9509df7", + "serverExecutionDuration": 2.6916969800368 }, + "outputs": [], "source": [ "model = BoTorchModel(\n", " # Optional `Surrogate` specification to use instead of default\n", @@ -425,22 +422,20 @@ " refit_on_cv=False,\n", " warm_start_refit=True,\n", ")" - ], - "execution_count": 6, - "outputs": [] + ] }, { "cell_type": "markdown", "metadata": { + "id": "fourth-material", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "db0feafe-8af9-40a3-9f67-72c7d1fd808e" }, - "id": "fourth-material", "originalKey": "7140bb19-09b4-4abe-951d-53902ae07833", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "## 2C. `Surrogate` and `Acquisition` Q&A\n", @@ -455,15 +450,15 @@ { "cell_type": "markdown", "metadata": { + "id": "violent-course", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "86018ee5-f7b8-41ae-8e2d-460fe5f0c15b" }, - "id": "violent-course", "originalKey": "71f92895-874d-4fc7-ae87-a5519b18d1a0", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "## 3. I know which Botorch `Model` and `AcquisitionFunction` I'd like to combine in Ax. How do set this up?" @@ -472,18 +467,18 @@ { "cell_type": "markdown", "metadata": { + "id": "unlike-football", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "code_folding": [], "hidden_ranges": [], "originalKey": "b29a846d-d7bc-4143-8318-10170c9b4298", "showInput": false }, - "id": "unlike-football", "originalKey": "4af8afa2-5056-46be-b7b9-428127e668cc", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "### 3a. Making a `Surrogate` from BoTorch `Model`:\n", @@ -496,23 +491,25 @@ }, { "cell_type": "code", + "execution_count": 7, "metadata": { + "collapsed": false, + "executionStartTime": 1730916308518, + "executionStopTime": 1730916308769, + "id": "dynamic-university", + "isAgentGenerated": false, + "language": "python", "metadata": { "code_folding": [], "hidden_ranges": [], "originalKey": "6c2ea955-c7a4-42ff-a4d7-f787113d4d53" }, - "id": "dynamic-university", "originalKey": "746fc2a3-0e0e-4ab4-84d9-32434eb1fc34", "outputsInitialized": true, - "isAgentGenerated": false, - "language": "python", - "executionStartTime": 1730916308518, - "executionStopTime": 1730916308769, - "serverExecutionDuration": 2.4644429795444, - "collapsed": false, - "requestMsgId": "746fc2a3-0e0e-4ab4-84d9-32434eb1fc34" + "requestMsgId": "746fc2a3-0e0e-4ab4-84d9-32434eb1fc34", + "serverExecutionDuration": 2.4644429795444 }, + "outputs": [], "source": [ "from botorch.models.model import Model\n", "from botorch.utils.datasets import SupervisedDataset\n", @@ -545,22 +542,20 @@ " )\n", " ]\n", ")" - ], - "execution_count": 7, - "outputs": [] + ] }, { "cell_type": "markdown", "metadata": { + "id": "otherwise-context", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "b9072296-956d-4add-b1f6-e7e0415ba65c" }, - "id": "otherwise-context", "originalKey": "5a27fd2c-4c4c-41fe-a634-f6d0ec4f1666", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "NOTE: if you run into a case where base `Surrogate` does not work with your BoTorch `Model`, please let us know in this Github issue: https://github.com/facebook/Ax/issues/363, so we can find the right solution and augment this tutorial." @@ -569,15 +564,15 @@ { "cell_type": "markdown", "metadata": { + "id": "northern-invite", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "335cabdf-2bf6-48e8-ba0c-1404a8ef47f9" }, - "id": "northern-invite", "originalKey": "df06d02b-95cb-4d34-aac6-773231f1a129", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "### 3B. Using an arbitrary BoTorch `AcquisitionFunction` in Ax" @@ -586,18 +581,18 @@ { "cell_type": "markdown", "metadata": { + "id": "surrounded-denial", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "code_folding": [], "hidden_ranges": [], "originalKey": "e3f0c788-2131-4116-9518-4ae7daeb991f", "showInput": false }, - "id": "surrounded-denial", "originalKey": "d4861847-b757-4fcd-9f35-ba258080812c", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "Steps to set up any `AcquisitionFunction` in Ax are:\n", @@ -609,24 +604,37 @@ }, { "cell_type": "code", + "execution_count": 8, "metadata": { + "collapsed": false, + "customOutput": null, + "executionStartTime": 1730916310518, + "executionStopTime": 1730916310772, + "id": "interested-search", + "isAgentGenerated": false, + "language": "python", "metadata": { "code_folding": [], "hidden_ranges": [], "originalKey": "6967ce3e-929b-4d9a-8cd1-72bf94f0be3a" }, - "id": "interested-search", "originalKey": "f188f40b-64ba-4b0c-b216-f3dea8c7465e", "outputsInitialized": true, - "isAgentGenerated": false, - "language": "python", - "executionStartTime": 1730916310518, - "executionStopTime": 1730916310772, - "serverExecutionDuration": 4.9752569757402, - "collapsed": false, "requestMsgId": "f188f40b-64ba-4b0c-b216-f3dea8c7465e", - "customOutput": null + "serverExecutionDuration": 4.9752569757402 }, + "outputs": [ + { + "data": { + "text/plain": [ + "BoTorchModel" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "from ax.models.torch.botorch_modular.optimizer_argparse import optimizer_argparse\n", "from botorch.acquisition.acquisition import AcquisitionFunction\n", @@ -663,31 +671,20 @@ " \"optimizer_options\": {\"sequential\": False},\n", " },\n", ")" - ], - "execution_count": 8, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": "BoTorchModel" - }, - "metadata": {}, - "execution_count": 8 - } ] }, { "cell_type": "markdown", "metadata": { + "id": "metallic-imaging", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "29256ab1-f214-4604-a423-4c7b4b36baa0" }, - "id": "metallic-imaging", "originalKey": "b057722d-b8ca-47dd-b2c8-1ff4a71c4863", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "See section 2A for combining the resulting `Surrogate` instance and `Acquisition` type into a `BoTorchModel`. You can also leverage `Models.BOTORCH_MODULAR` for ease of use; more on it in section 4 below or in section 1 quick-start example." @@ -696,15 +693,15 @@ { "cell_type": "markdown", "metadata": { + "id": "descending-australian", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "1d15082f-1df7-4cdb-958b-300483eb7808" }, - "id": "descending-australian", "originalKey": "a7406f13-1468-487d-ac5e-7d2a45394850", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "## 4. Using `Models.BOTORCH_MODULAR` \n", @@ -716,123 +713,129 @@ }, { "cell_type": "code", + "execution_count": 9, "metadata": { + "collapsed": false, + "executionStartTime": 1730916311983, + "executionStopTime": 1730916312395, + "id": "attached-border", + "isAgentGenerated": false, + "language": "python", "metadata": { "originalKey": "385b2f30-fd86-4d88-8784-f238ea8a6abb" }, - "id": "attached-border", "originalKey": "052cf2e4-8de0-4ec3-a3f9-478194b10928", "outputsInitialized": true, - "isAgentGenerated": false, - "language": "python", - "executionStartTime": 1730916311983, - "executionStopTime": 1730916312395, - "serverExecutionDuration": 202.78578903526, - "collapsed": false, - "requestMsgId": "052cf2e4-8de0-4ec3-a3f9-478194b10928" + "requestMsgId": "052cf2e4-8de0-4ec3-a3f9-478194b10928", + "serverExecutionDuration": 202.78578903526 }, - "source": [ - "model_bridge_with_GPEI = Models.BOTORCH_MODULAR(\n", - " experiment=experiment,\n", - " data=data,\n", - ")\n", - "model_bridge_with_GPEI.gen(1)" - ], - "execution_count": 9, "outputs": [ { - "output_type": "stream", "name": "stderr", + "output_type": "stream", "text": [ "[INFO 11-06 10:05:12] ax.modelbridge.transforms.standardize_y: Outcome branin is constant, within tolerance.\n" ] }, { - "output_type": "execute_result", "data": { - "text/plain": "GeneratorRun(1 arms, total weight 1.0)" + "text/plain": [ + "GeneratorRun(1 arms, total weight 1.0)" + ] }, + "execution_count": 9, "metadata": {}, - "execution_count": 9 + "output_type": "execute_result" } + ], + "source": [ + "model_bridge_with_GPEI = Models.BOTORCH_MODULAR(\n", + " experiment=experiment,\n", + " data=data,\n", + ")\n", + "model_bridge_with_GPEI.gen(1)" ] }, { "cell_type": "code", + "execution_count": 10, "metadata": { + "collapsed": false, + "executionStartTime": 1730916312432, + "executionStopTime": 1730916312657, + "id": "powerful-gamma", + "isAgentGenerated": false, + "language": "python", "metadata": { "originalKey": "89930a31-e058-434b-b587-181931e247b6" }, - "id": "powerful-gamma", "originalKey": "b7f924fe-f3d9-4211-b402-421f4c90afe5", "outputsInitialized": true, - "isAgentGenerated": false, - "language": "python", - "executionStartTime": 1730916312432, - "executionStopTime": 1730916312657, - "serverExecutionDuration": 3.1334219966084, - "collapsed": false, - "requestMsgId": "b7f924fe-f3d9-4211-b402-421f4c90afe5" + "requestMsgId": "b7f924fe-f3d9-4211-b402-421f4c90afe5", + "serverExecutionDuration": 3.1334219966084 }, - "source": [ - "model_bridge_with_GPEI.model.botorch_acqf_class" - ], - "execution_count": 10, "outputs": [ { - "output_type": "execute_result", "data": { - "text/plain": "botorch.acquisition.logei.qLogNoisyExpectedImprovement" + "text/plain": [ + "botorch.acquisition.logei.qLogNoisyExpectedImprovement" + ] }, + "execution_count": 10, "metadata": {}, - "execution_count": 10 + "output_type": "execute_result" } + ], + "source": [ + "model_bridge_with_GPEI.model.botorch_acqf_class" ] }, { "cell_type": "code", + "execution_count": 11, "metadata": { + "collapsed": false, + "executionStartTime": 1730916312847, + "executionStopTime": 1730916313093, + "id": "improved-replication", + "isAgentGenerated": false, + "language": "python", "metadata": { "originalKey": "f9a9cb14-20c3-4e1d-93a3-6a35c281ae01" }, - "id": "improved-replication", "originalKey": "942f1817-8d40-48f8-8725-90c25a079e4c", "outputsInitialized": true, - "isAgentGenerated": false, - "language": "python", - "executionStartTime": 1730916312847, - "executionStopTime": 1730916313093, - "serverExecutionDuration": 3.410067060031, - "collapsed": false, - "requestMsgId": "942f1817-8d40-48f8-8725-90c25a079e4c" + "requestMsgId": "942f1817-8d40-48f8-8725-90c25a079e4c", + "serverExecutionDuration": 3.410067060031 }, - "source": [ - "model_bridge_with_GPEI.model.surrogate.model.__class__" - ], - "execution_count": 11, "outputs": [ { - "output_type": "execute_result", "data": { - "text/plain": "botorch.models.gp_regression.SingleTaskGP" + "text/plain": [ + "botorch.models.gp_regression.SingleTaskGP" + ] }, + "execution_count": 11, "metadata": {}, - "execution_count": 11 + "output_type": "execute_result" } + ], + "source": [ + "model_bridge_with_GPEI.model.surrogate.model.__class__" ] }, { "cell_type": "markdown", "metadata": { + "id": "connected-sheet", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "8b6a9ddc-d2d2-4cd5-a6a8-820113f78262" }, - "id": "connected-sheet", "originalKey": "f5c0adbd-00a6-428d-810f-1e7ed0954b08", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "We can use the same `Models.BOTORCH_MODULAR` to set up a model for multi-objective optimization:" @@ -840,139 +843,145 @@ }, { "cell_type": "code", + "execution_count": 12, "metadata": { + "collapsed": false, + "executionStartTime": 1730916314009, + "executionStopTime": 1730916314736, + "id": "documentary-jurisdiction", + "isAgentGenerated": false, + "language": "python", "metadata": { "originalKey": "8001de33-d9d9-4888-a5d1-7a59ebeccfd5" }, - "id": "documentary-jurisdiction", "originalKey": "9c64c497-f663-42a6-aa48-1f1f2ae2b80b", "outputsInitialized": true, - "isAgentGenerated": false, - "language": "python", - "executionStartTime": 1730916314009, - "executionStopTime": 1730916314736, - "serverExecutionDuration": 518.53136904538, - "collapsed": false, - "requestMsgId": "9c64c497-f663-42a6-aa48-1f1f2ae2b80b" + "requestMsgId": "9c64c497-f663-42a6-aa48-1f1f2ae2b80b", + "serverExecutionDuration": 518.53136904538 }, - "source": [ - "model_bridge_with_EHVI = Models.BOTORCH_MODULAR(\n", - " experiment=get_branin_experiment_with_multi_objective(\n", - " has_objective_thresholds=True, with_batch=True\n", - " ),\n", - " data=get_branin_data_multi_objective(),\n", - ")\n", - "model_bridge_with_EHVI.gen(1)" - ], - "execution_count": 12, "outputs": [ { - "output_type": "stream", "name": "stderr", + "output_type": "stream", "text": [ "[INFO 11-06 10:05:14] ax.core.experiment: The is_test flag has been set to True. This flag is meant purely for development and integration testing purposes. If you are running a live experiment, please set this flag to False\n" ] }, { - "output_type": "stream", "name": "stderr", + "output_type": "stream", "text": [ "[INFO 11-06 10:05:14] ax.modelbridge.transforms.standardize_y: Outcome branin_a is constant, within tolerance.\n" ] }, { - "output_type": "stream", "name": "stderr", + "output_type": "stream", "text": [ "[INFO 11-06 10:05:14] ax.modelbridge.transforms.standardize_y: Outcome branin_b is constant, within tolerance.\n" ] }, { - "output_type": "execute_result", "data": { - "text/plain": "GeneratorRun(1 arms, total weight 1.0)" + "text/plain": [ + "GeneratorRun(1 arms, total weight 1.0)" + ] }, + "execution_count": 12, "metadata": {}, - "execution_count": 12 + "output_type": "execute_result" } + ], + "source": [ + "model_bridge_with_EHVI = Models.BOTORCH_MODULAR(\n", + " experiment=get_branin_experiment_with_multi_objective(\n", + " has_objective_thresholds=True, with_batch=True\n", + " ),\n", + " data=get_branin_data_multi_objective(),\n", + ")\n", + "model_bridge_with_EHVI.gen(1)" ] }, { "cell_type": "code", + "execution_count": 13, "metadata": { + "collapsed": false, + "executionStartTime": 1730916314586, + "executionStopTime": 1730916314842, + "id": "changed-maintenance", + "isAgentGenerated": false, + "language": "python", "metadata": { "originalKey": "dcfdbecc-4a9a-49ac-ad55-0bc04b2ec566" }, - "id": "changed-maintenance", "originalKey": "ab6e84ac-2a55-4f48-9ab7-06b8d9b58d1f", "outputsInitialized": true, - "isAgentGenerated": false, - "language": "python", - "executionStartTime": 1730916314586, - "executionStopTime": 1730916314842, - "serverExecutionDuration": 3.3097150735557, - "collapsed": false, - "requestMsgId": "ab6e84ac-2a55-4f48-9ab7-06b8d9b58d1f" + "requestMsgId": "ab6e84ac-2a55-4f48-9ab7-06b8d9b58d1f", + "serverExecutionDuration": 3.3097150735557 }, - "source": [ - "model_bridge_with_EHVI.model.botorch_acqf_class" - ], - "execution_count": 13, "outputs": [ { - "output_type": "execute_result", "data": { - "text/plain": "botorch.acquisition.multi_objective.logei.qLogNoisyExpectedHypervolumeImprovement" + "text/plain": [ + "botorch.acquisition.multi_objective.logei.qLogNoisyExpectedHypervolumeImprovement" + ] }, + "execution_count": 13, "metadata": {}, - "execution_count": 13 + "output_type": "execute_result" } + ], + "source": [ + "model_bridge_with_EHVI.model.botorch_acqf_class" ] }, { "cell_type": "code", + "execution_count": 14, "metadata": { + "collapsed": false, + "executionStartTime": 1730916315097, + "executionStopTime": 1730916315308, + "id": "operating-shelf", + "isAgentGenerated": false, + "language": "python", "metadata": { "originalKey": "16727a51-337d-4715-bf51-9cb6637a950f" }, - "id": "operating-shelf", "originalKey": "1e980e3c-09f6-44c1-a79f-f59867de0c3e", "outputsInitialized": true, - "isAgentGenerated": false, - "language": "python", - "executionStartTime": 1730916315097, - "executionStopTime": 1730916315308, - "serverExecutionDuration": 3.4662369871512, - "collapsed": false, - "requestMsgId": "1e980e3c-09f6-44c1-a79f-f59867de0c3e" + "requestMsgId": "1e980e3c-09f6-44c1-a79f-f59867de0c3e", + "serverExecutionDuration": 3.4662369871512 }, - "source": [ - "model_bridge_with_EHVI.model.surrogate.model.__class__" - ], - "execution_count": 14, "outputs": [ { - "output_type": "execute_result", "data": { - "text/plain": "botorch.models.gp_regression.SingleTaskGP" + "text/plain": [ + "botorch.models.gp_regression.SingleTaskGP" + ] }, + "execution_count": 14, "metadata": {}, - "execution_count": 14 + "output_type": "execute_result" } + ], + "source": [ + "model_bridge_with_EHVI.model.surrogate.model.__class__" ] }, { "cell_type": "markdown", "metadata": { + "id": "fatal-butterfly", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "5c64eecc-5ce5-4907-bbcc-5b3cbf4358ae" }, - "id": "fatal-butterfly", "originalKey": "3ad7c4a7-fe19-44ad-938d-1be4f8b09bfb", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "Furthermore, the quick-start example at the top of this tutorial shows how to specify surrogate and acquisition subcomponents to `Models.BOTORCH_MODULAR`. " @@ -981,15 +990,15 @@ { "cell_type": "markdown", "metadata": { + "id": "hearing-interface", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "a0163432-f0ca-4582-ad84-16c77c99f20b" }, - "id": "hearing-interface", "originalKey": "44adf1ce-6d3e-455d-b53c-32d3c42a843f", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "## 5. Utilizing `BoTorchModel` in generation strategies\n", @@ -1001,21 +1010,23 @@ }, { "cell_type": "code", + "execution_count": 15, "metadata": { + "collapsed": false, + "executionStartTime": 1730916316730, + "executionStopTime": 1730916316968, + "id": "received-registration", + "isAgentGenerated": false, + "language": "python", "metadata": { "originalKey": "f7eabbcf-607c-4bed-9a0e-6ac6e8b04350" }, - "id": "received-registration", "originalKey": "4ee172c8-0648-418b-9968-647e8e916507", "outputsInitialized": true, - "isAgentGenerated": false, - "language": "python", - "executionStartTime": 1730916316730, - "executionStopTime": 1730916316968, - "serverExecutionDuration": 2.2927720565349, - "collapsed": false, - "requestMsgId": "4ee172c8-0648-418b-9968-647e8e916507" + "requestMsgId": "4ee172c8-0648-418b-9968-647e8e916507", + "serverExecutionDuration": 2.2927720565349 }, + "outputs": [], "source": [ "from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy\n", "from ax.modelbridge.modelbridge_utils import get_pending_observation_features\n", @@ -1045,22 +1056,20 @@ " ),\n", " ]\n", ")" - ], - "execution_count": 15, - "outputs": [] + ] }, { "cell_type": "markdown", "metadata": { + "id": "logical-windsor", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "212c4543-220e-4605-8f72-5f86cf52f722" }, - "id": "logical-windsor", "originalKey": "ba3783ee-3d88-4e44-ad07-77de3c50f84d", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "Set up an experiment and generate 10 trials in it, adding synthetic data to experiment after each one:" @@ -1068,58 +1077,60 @@ }, { "cell_type": "code", + "execution_count": 16, "metadata": { + "collapsed": false, + "executionStartTime": 1730916317751, + "executionStopTime": 1730916318153, + "id": "viral-cheese", + "isAgentGenerated": false, + "language": "python", "metadata": { "originalKey": "30cfcdd7-721d-4f89-b851-7a94140dfad6" }, - "id": "viral-cheese", "originalKey": "1b7d0cfc-f7cf-477d-b109-d34db9604938", "outputsInitialized": true, - "isAgentGenerated": false, - "language": "python", - "executionStartTime": 1730916317751, - "executionStopTime": 1730916318153, - "serverExecutionDuration": 3.9581339806318, - "collapsed": false, - "requestMsgId": "1b7d0cfc-f7cf-477d-b109-d34db9604938" + "requestMsgId": "1b7d0cfc-f7cf-477d-b109-d34db9604938", + "serverExecutionDuration": 3.9581339806318 }, - "source": [ - "experiment = get_branin_experiment(minimize=True)\n", - "\n", - "assert len(experiment.trials) == 0\n", - "experiment.search_space" - ], - "execution_count": 16, "outputs": [ { - "output_type": "stream", "name": "stderr", + "output_type": "stream", "text": [ "[INFO 11-06 10:05:18] ax.core.experiment: The is_test flag has been set to True. This flag is meant purely for development and integration testing purposes. If you are running a live experiment, please set this flag to False\n" ] }, { - "output_type": "execute_result", "data": { - "text/plain": "SearchSpace(parameters=[RangeParameter(name='x1', parameter_type=FLOAT, range=[-5.0, 10.0]), RangeParameter(name='x2', parameter_type=FLOAT, range=[0.0, 15.0])], parameter_constraints=[])" + "text/plain": [ + "SearchSpace(parameters=[RangeParameter(name='x1', parameter_type=FLOAT, range=[-5.0, 10.0]), RangeParameter(name='x2', parameter_type=FLOAT, range=[0.0, 15.0])], parameter_constraints=[])" + ] }, + "execution_count": 16, "metadata": {}, - "execution_count": 16 + "output_type": "execute_result" } + ], + "source": [ + "experiment = get_branin_experiment(minimize=True)\n", + "\n", + "assert len(experiment.trials) == 0\n", + "experiment.search_space" ] }, { "cell_type": "markdown", "metadata": { + "id": "incident-newspaper", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "2807d7ce-8a6b-423c-b5f5-32edba09c78e" }, - "id": "incident-newspaper", "originalKey": "df2e90f5-4132-4d87-989b-e6d47c748ddc", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "## 5a. Specifying `pending_observations`\n", @@ -1130,98 +1141,102 @@ }, { "cell_type": "code", + "execution_count": 17, "metadata": { + "collapsed": false, + "executionStartTime": 1730916318830, + "executionStopTime": 1730916321328, + "id": "casual-spread", + "isAgentGenerated": false, + "language": "python", "metadata": { "originalKey": "58aafd65-a366-4b66-a1b1-31b207037a2e" }, - "id": "casual-spread", "originalKey": "fe7437c5-8834-46cc-94b2-91782d91ee96", "outputsInitialized": true, - "isAgentGenerated": false, - "language": "python", - "executionStartTime": 1730916318830, - "executionStopTime": 1730916321328, - "serverExecutionDuration": 2274.8276960338, - "collapsed": false, - "requestMsgId": "fe7437c5-8834-46cc-94b2-91782d91ee96" + "requestMsgId": "fe7437c5-8834-46cc-94b2-91782d91ee96", + "serverExecutionDuration": 2274.8276960338 }, - "source": [ - "for _ in range(10):\n", - " # Produce a new generator run and attach it to experiment as a trial\n", - " generator_run = gs.gen(\n", - " experiment=experiment,\n", - " n=1,\n", - " pending_observations=get_pending_observation_features(experiment=experiment),\n", - " )\n", - " trial = experiment.new_trial(generator_run)\n", - "\n", - " # Mark the trial as 'RUNNING' so we can mark it 'COMPLETED' later\n", - " trial.mark_running(no_runner_required=True)\n", - "\n", - " # Attach data for the new trial and mark it 'COMPLETED'\n", - " experiment.attach_data(get_branin_data(trials=[trial]))\n", - " trial.mark_completed()\n", - "\n", - " print(f\"Completed trial #{trial.index}, suggested by {generator_run._model_key}.\")" - ], - "execution_count": 17, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ - "Completed trial #0, suggested by Sobol.\nCompleted trial #1, suggested by Sobol.\nCompleted trial #2, suggested by Sobol.\nCompleted trial #3, suggested by Sobol.\nCompleted trial #4, suggested by Sobol.\n" + "Completed trial #0, suggested by Sobol.\n", + "Completed trial #1, suggested by Sobol.\n", + "Completed trial #2, suggested by Sobol.\n", + "Completed trial #3, suggested by Sobol.\n", + "Completed trial #4, suggested by Sobol.\n" ] }, { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "Completed trial #5, suggested by BoTorch.\n" ] }, { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "Completed trial #6, suggested by BoTorch.\n" ] }, { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "Completed trial #7, suggested by BoTorch.\n" ] }, { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "Completed trial #8, suggested by BoTorch.\n" ] }, { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "Completed trial #9, suggested by BoTorch.\n" ] } + ], + "source": [ + "for _ in range(10):\n", + " # Produce a new generator run and attach it to experiment as a trial\n", + " generator_run = gs.gen(\n", + " experiment=experiment,\n", + " n=1,\n", + " pending_observations=get_pending_observation_features(experiment=experiment),\n", + " )\n", + " trial = experiment.new_trial(generator_run)\n", + "\n", + " # Mark the trial as 'RUNNING' so we can mark it 'COMPLETED' later\n", + " trial.mark_running(no_runner_required=True)\n", + "\n", + " # Attach data for the new trial and mark it 'COMPLETED'\n", + " experiment.attach_data(get_branin_data(trials=[trial]))\n", + " trial.mark_completed()\n", + "\n", + " print(f\"Completed trial #{trial.index}, suggested by {generator_run._model_key}.\")" ] }, { "cell_type": "markdown", "metadata": { + "id": "circular-vermont", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "9d3b86bf-b691-4315-8b8f-60504b37818c" }, - "id": "circular-vermont", "originalKey": "6a78ef13-fbaa-4cae-934b-d57f5807fe25", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "Now we examine the experiment and observe the trials that were added to it and produced by the generation strategy:" @@ -1229,200 +1244,346 @@ }, { "cell_type": "code", + "execution_count": 18, "metadata": { + "collapsed": false, + "executionStartTime": 1730916319576, + "executionStopTime": 1730916321368, + "id": "significant-particular", + "isAgentGenerated": false, + "language": "python", "metadata": { "originalKey": "ca12913d-e3fd-4617-a247-e3432665bac1" }, - "id": "significant-particular", "originalKey": "b3160bc0-d5d1-45fa-bf62-4b9dd5778cac", "outputsInitialized": true, - "isAgentGenerated": false, - "language": "python", - "executionStartTime": 1730916319576, - "executionStopTime": 1730916321368, - "serverExecutionDuration": 35.789265064523, - "collapsed": false, - "requestMsgId": "b3160bc0-d5d1-45fa-bf62-4b9dd5778cac" + "requestMsgId": "b3160bc0-d5d1-45fa-bf62-4b9dd5778cac", + "serverExecutionDuration": 35.789265064523 }, - "source": [ - "exp_to_df(experiment)" - ], - "execution_count": 18, "outputs": [ { - "output_type": "stream", "name": "stderr", + "output_type": "stream", "text": [ "[WARNING 11-06 10:05:21] ax.service.utils.report_utils: Column reason missing for all trials. Not appending column.\n" ] }, { - "output_type": "execute_result", "data": { - "text/plain": " trial_index arm_name trial_status ... branin x1 x2\n0 0 0_0 COMPLETED ... 26.922506 -2.244023 5.435609\n1 1 1_0 COMPLETED ... 74.072517 3.535081 10.528676\n2 2 2_0 COMPLETED ... 5.610080 8.741262 3.706691\n3 3 3_0 COMPLETED ... 56.657623 -0.069164 12.199905\n4 4 4_0 COMPLETED ... 27.932704 0.862014 1.306074\n5 5 5_0 COMPLETED ... 5.423062 10.000000 4.868411\n6 6 6_0 COMPLETED ... 9.250452 10.000000 0.299753\n7 7 7_0 COMPLETED ... 308.129096 -5.000000 0.000000\n8 8 8_0 COMPLETED ... 17.607633 0.778687 5.717932\n9 9 9_0 COMPLETED ... 132.986209 1.451895 15.000000\n\n[10 rows x 7 columns]", - "text/html": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
trial_indexarm_nametrial_statusgeneration_methodbraninx1x2
000_0COMPLETEDSobol26.922506-2.2440235.435609
111_0COMPLETEDSobol74.0725173.53508110.528676
222_0COMPLETEDSobol5.6100808.7412623.706691
333_0COMPLETEDSobol56.657623-0.06916412.199905
444_0COMPLETEDSobol27.9327040.8620141.306074
555_0COMPLETEDBoTorch5.42306210.0000004.868411
666_0COMPLETEDBoTorch9.25045210.0000000.299753
777_0COMPLETEDBoTorch308.129096-5.0000000.000000
888_0COMPLETEDBoTorch17.6076330.7786875.717932
999_0COMPLETEDBoTorch132.9862091.45189515.000000
\n
", "application/vnd.dataresource+json": { - "schema": { - "fields": [ - { - "name": "index", - "type": "integer" - }, - { - "name": "trial_index", - "type": "integer" - }, - { - "name": "arm_name", - "type": "string" - }, - { - "name": "trial_status", - "type": "string" - }, - { - "name": "generation_method", - "type": "string" - }, - { - "name": "branin", - "type": "number" - }, - { - "name": "x1", - "type": "number" - }, - { - "name": "x2", - "type": "number" - } - ], - "primaryKey": [ - "index" - ], - "pandas_version": "1.4.0" - }, "data": [ { + "arm_name": "0_0", + "branin": 26.9225058393, + "generation_method": "Sobol", "index": 0, "trial_index": 0, - "arm_name": "0_0", "trial_status": "COMPLETED", - "generation_method": "Sobol", - "branin": 26.9225058393, "x1": -2.2440226376, "x2": 5.4356087744 }, { + "arm_name": "1_0", + "branin": 74.0725171307, + "generation_method": "Sobol", "index": 1, "trial_index": 1, - "arm_name": "1_0", "trial_status": "COMPLETED", - "generation_method": "Sobol", - "branin": 74.0725171307, "x1": 3.535081069, "x2": 10.5286756391 }, { + "arm_name": "2_0", + "branin": 5.6100798162, + "generation_method": "Sobol", "index": 2, "trial_index": 2, - "arm_name": "2_0", "trial_status": "COMPLETED", - "generation_method": "Sobol", - "branin": 5.6100798162, "x1": 8.7412616471, "x2": 3.7066908041 }, { + "arm_name": "3_0", + "branin": 56.6576230229, + "generation_method": "Sobol", "index": 3, "trial_index": 3, - "arm_name": "3_0", "trial_status": "COMPLETED", - "generation_method": "Sobol", - "branin": 56.6576230229, "x1": -0.0691637676, "x2": 12.1999046439 }, { + "arm_name": "4_0", + "branin": 27.9327040954, + "generation_method": "Sobol", "index": 4, "trial_index": 4, - "arm_name": "4_0", "trial_status": "COMPLETED", - "generation_method": "Sobol", - "branin": 27.9327040954, "x1": 0.8620139305, "x2": 1.3060741313 }, { + "arm_name": "5_0", + "branin": 5.4230616409, + "generation_method": "BoTorch", "index": 5, "trial_index": 5, - "arm_name": "5_0", "trial_status": "COMPLETED", - "generation_method": "BoTorch", - "branin": 5.4230616409, "x1": 10, "x2": 4.8684112356 }, { + "arm_name": "6_0", + "branin": 9.2504522786, + "generation_method": "BoTorch", "index": 6, "trial_index": 6, - "arm_name": "6_0", "trial_status": "COMPLETED", - "generation_method": "BoTorch", - "branin": 9.2504522786, "x1": 10, "x2": 0.2997526514 }, { + "arm_name": "7_0", + "branin": 308.1290960116, + "generation_method": "BoTorch", "index": 7, "trial_index": 7, - "arm_name": "7_0", "trial_status": "COMPLETED", - "generation_method": "BoTorch", - "branin": 308.1290960116, "x1": -5, "x2": 0 }, { + "arm_name": "8_0", + "branin": 17.6076329851, + "generation_method": "BoTorch", "index": 8, "trial_index": 8, - "arm_name": "8_0", "trial_status": "COMPLETED", - "generation_method": "BoTorch", - "branin": 17.6076329851, "x1": 0.7786866384, "x2": 5.7179317285 }, { + "arm_name": "9_0", + "branin": 132.9862090134, + "generation_method": "BoTorch", "index": 9, "trial_index": 9, - "arm_name": "9_0", "trial_status": "COMPLETED", - "generation_method": "BoTorch", - "branin": 132.9862090134, "x1": 1.451894724, "x2": 15 } - ] - } + ], + "schema": { + "fields": [ + { + "name": "index", + "type": "integer" + }, + { + "name": "trial_index", + "type": "integer" + }, + { + "name": "arm_name", + "type": "string" + }, + { + "name": "trial_status", + "type": "string" + }, + { + "name": "generation_method", + "type": "string" + }, + { + "name": "branin", + "type": "number" + }, + { + "name": "x1", + "type": "number" + }, + { + "name": "x2", + "type": "number" + } + ], + "pandas_version": "1.4.0", + "primaryKey": [ + "index" + ] + } + }, + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
trial_indexarm_nametrial_statusgeneration_methodbraninx1x2
000_0COMPLETEDSobol26.922506-2.2440235.435609
111_0COMPLETEDSobol74.0725173.53508110.528676
222_0COMPLETEDSobol5.6100808.7412623.706691
333_0COMPLETEDSobol56.657623-0.06916412.199905
444_0COMPLETEDSobol27.9327040.8620141.306074
555_0COMPLETEDBoTorch5.42306210.0000004.868411
666_0COMPLETEDBoTorch9.25045210.0000000.299753
777_0COMPLETEDBoTorch308.129096-5.0000000.000000
888_0COMPLETEDBoTorch17.6076330.7786875.717932
999_0COMPLETEDBoTorch132.9862091.45189515.000000
\n", + "
" + ], + "text/plain": [ + " trial_index arm_name trial_status ... branin x1 x2\n", + "0 0 0_0 COMPLETED ... 26.922506 -2.244023 5.435609\n", + "1 1 1_0 COMPLETED ... 74.072517 3.535081 10.528676\n", + "2 2 2_0 COMPLETED ... 5.610080 8.741262 3.706691\n", + "3 3 3_0 COMPLETED ... 56.657623 -0.069164 12.199905\n", + "4 4 4_0 COMPLETED ... 27.932704 0.862014 1.306074\n", + "5 5 5_0 COMPLETED ... 5.423062 10.000000 4.868411\n", + "6 6 6_0 COMPLETED ... 9.250452 10.000000 0.299753\n", + "7 7 7_0 COMPLETED ... 308.129096 -5.000000 0.000000\n", + "8 8 8_0 COMPLETED ... 17.607633 0.778687 5.717932\n", + "9 9 9_0 COMPLETED ... 132.986209 1.451895 15.000000\n", + "\n", + "[10 rows x 7 columns]" + ] }, + "execution_count": 18, "metadata": {}, - "execution_count": 18 + "output_type": "execute_result" } + ], + "source": [ + "exp_to_df(experiment)" ] }, { "cell_type": "markdown", "metadata": { + "id": "obvious-transparency", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "c25da720-6d3d-4f16-b878-24f2d2755783" }, - "id": "obvious-transparency", "originalKey": "633c66af-a89f-4f03-a88b-866767d0a52f", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "## 6. Customizing a `Surrogate` or `Acquisition`\n", @@ -1434,21 +1595,23 @@ }, { "cell_type": "code", + "execution_count": 19, "metadata": { + "collapsed": false, + "executionStartTime": 1730916320585, + "executionStopTime": 1730916321384, + "id": "organizational-balance", + "isAgentGenerated": false, + "language": "python", "metadata": { "originalKey": "e7f8e413-f01e-4f9d-82c1-4912097637af" }, - "id": "organizational-balance", "originalKey": "2949718a-8a4e-41e5-91ac-5b020eface47", "outputsInitialized": true, - "isAgentGenerated": false, - "language": "python", - "executionStartTime": 1730916320585, - "executionStopTime": 1730916321384, - "serverExecutionDuration": 2.2059100447223, - "collapsed": false, - "requestMsgId": "2949718a-8a4e-41e5-91ac-5b020eface47" + "requestMsgId": "2949718a-8a4e-41e5-91ac-5b020eface47", + "serverExecutionDuration": 2.2059100447223 }, + "outputs": [], "source": [ "from botorch.acquisition.objective import MCAcquisitionObjective, PosteriorTransform\n", "from botorch.acquisition.risk_measures import RiskMeasureMCObjective\n", @@ -1466,22 +1629,20 @@ " risk_measure: Optional[RiskMeasureMCObjective] = None,\n", " ) -> Tuple[Optional[MCAcquisitionObjective], Optional[PosteriorTransform]]:\n", " ... # Produce the desired `MCAcquisitionObjective` and `PosteriorTransform` instead of the default" - ], - "execution_count": 19, - "outputs": [] + ] }, { "cell_type": "markdown", "metadata": { + "id": "theoretical-horizon", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "7299f0fc-e19e-4383-99de-ef7a9a987fe9" }, - "id": "theoretical-horizon", "originalKey": "0ec8606d-9d5b-4bcb-ad7e-f54839ad6f9b", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "Then to use the new subclass in `BoTorchModel`, just specify `acquisition_class` argument along with `botorch_acqf_class` (to `BoTorchModel` directly or to `Models.BOTORCH_MODULAR`, which just passes the relevant arguments to `BoTorchModel` under the hood, as discussed in section 4):" @@ -1489,60 +1650,62 @@ }, { "cell_type": "code", + "execution_count": 20, "metadata": { + "collapsed": false, + "executionStartTime": 1730916321675, + "executionStopTime": 1730916321901, + "id": "approximate-rolling", + "isAgentGenerated": false, + "language": "python", "metadata": { "originalKey": "07fe169a-78de-437e-9857-7c99cc48eedc" }, - "id": "approximate-rolling", "originalKey": "e231ea1e-c70d-48dc-b6c6-1611c5ea1b26", "outputsInitialized": true, - "isAgentGenerated": false, - "language": "python", - "executionStartTime": 1730916321675, - "executionStopTime": 1730916321901, - "serverExecutionDuration": 12.351316981949, - "collapsed": false, - "requestMsgId": "e231ea1e-c70d-48dc-b6c6-1611c5ea1b26" + "requestMsgId": "e231ea1e-c70d-48dc-b6c6-1611c5ea1b26", + "serverExecutionDuration": 12.351316981949 }, - "source": [ - "Models.BOTORCH_MODULAR(\n", - " experiment=experiment,\n", - " data=data,\n", - " acquisition_class=CustomObjectiveAcquisition,\n", - " botorch_acqf_class=MyAcquisitionFunctionClass,\n", - ")" - ], - "execution_count": 20, "outputs": [ { - "output_type": "stream", "name": "stderr", + "output_type": "stream", "text": [ "[INFO 11-06 10:05:21] ax.modelbridge.transforms.standardize_y: Outcome branin is constant, within tolerance.\n" ] }, { - "output_type": "execute_result", "data": { - "text/plain": "TorchModelBridge(model=BoTorchModel)" + "text/plain": [ + "TorchModelBridge(model=BoTorchModel)" + ] }, + "execution_count": 20, "metadata": {}, - "execution_count": 20 + "output_type": "execute_result" } + ], + "source": [ + "Models.BOTORCH_MODULAR(\n", + " experiment=experiment,\n", + " data=data,\n", + " acquisition_class=CustomObjectiveAcquisition,\n", + " botorch_acqf_class=MyAcquisitionFunctionClass,\n", + ")" ] }, { "cell_type": "markdown", "metadata": { + "id": "representative-implement", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "608d5f0d-4528-4aa6-869d-db38fcbfb256" }, - "id": "representative-implement", "originalKey": "cdcfb2bc-3016-4681-9fff-407f28321c3f", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "To use a custom `Surrogate` subclass, pass the `surrogate` argument of that type:\n", @@ -1558,15 +1721,15 @@ { "cell_type": "markdown", "metadata": { + "id": "framed-intermediate", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "64f1289e-73c7-4cc5-96ee-5091286a8361" }, - "id": "framed-intermediate", "originalKey": "ff03d674-f584-403f-ba65-f1bab921845b", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "------" @@ -1575,15 +1738,15 @@ { "cell_type": "markdown", "metadata": { + "id": "metropolitan-feedback", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "d1e37569-dd0d-4561-b890-2f0097a345e0" }, - "id": "metropolitan-feedback", "originalKey": "f71fcfa1-fc59-4bfb-84d6-b94ea5298bfa", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "## Appendix 1: Methods available on `BoTorchModel`\n", @@ -1599,22 +1762,21 @@ "* `update` updates surrogate model with training data and optionally reoptimizes model parameters via `Surrogate.update`,\n", "* `cross_validate` re-fits the surrogate model to subset of training data and makes predictions for test data,\n", "* `evaluate_acquisition_function` instantiates an acquisition function and evaluates it for a given point.\n", - "------\n", - "" + "------\n" ] }, { "cell_type": "markdown", "metadata": { + "id": "possible-transsexual", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "b02f928c-57d9-4b2a-b4fe-c6d28d368b12" }, - "id": "possible-transsexual", "originalKey": "91cedde4-8911-441f-af05-eb124581cbbc", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "## Appendix 2: Default surrogate models and acquisition functions\n", @@ -1633,15 +1795,15 @@ { "cell_type": "markdown", "metadata": { + "id": "continuous-strain", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "76ae9852-9d21-43d6-bf75-bb087a474dd6" }, - "id": "continuous-strain", "originalKey": "c8b0f933-8df6-479b-aa61-db75ca877624", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "## Appendix 3: Handling storage errors that arise from objects that don't have serialization logic in A\n", @@ -1652,15 +1814,15 @@ { "cell_type": "markdown", "metadata": { + "id": "broadband-voice", + "isAgentGenerated": false, + "language": "markdown", "metadata": { "originalKey": "6487b68e-b808-4372-b6ba-ab02ce4826bc" }, - "id": "broadband-voice", "originalKey": "4d82f49a-3a8b-42f0-a4f5-5c079b793344", - "showInput": false, "outputsInitialized": false, - "isAgentGenerated": false, - "language": "markdown" + "showInput": false }, "source": [ "The two options for handling this error are:\n", @@ -1668,5 +1830,26 @@ "2. specifying serialization logic for a given object that needs to occur among the `Model` or `AcquisitionFunction` options. Tutorial for this is in the works, but in the meantime you can [post an issue on the Ax GitHub](https://github.com/facebook/Ax/issues) to get help with this." ] } - ] + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 } diff --git a/tutorials/multi_task.ipynb b/tutorials/multi_task/multi_task.ipynb similarity index 95% rename from tutorials/multi_task.ipynb rename to tutorials/multi_task/multi_task.ipynb index 4e1559430f6..c543120a8c1 100644 --- a/tutorials/multi_task.ipynb +++ b/tutorials/multi_task/multi_task.ipynb @@ -22,16 +22,25 @@ "Throughout the optimization we can make nosiy observations directly of the objective (an online observation), and we can make noisy observations of a biased version of the objective (offline observations). Bias is simulated by passing the function values through a piecewise linear function. Offline observations are much less time-consuming than online observations, so we wish to use them to improve our ability to optimize the online objective." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "in_colab = 'google.colab' in sys.modules\n", + "if in_colab:\n", + " %pip install ax-platform" + ] + }, { "cell_type": "code", "execution_count": null, "metadata": { "code_folding": [], "hidden_ranges": [], - "originalKey": "3ce827be-d20b-48d3-a6ff-291bd442c748", - "vscode": { - "languageId": "python" - } + "originalKey": "3ce827be-d20b-48d3-a6ff-291bd442c748" }, "outputs": [], "source": [ @@ -68,8 +77,11 @@ "from ax.runners.synthetic import SyntheticRunner\n", "from ax.utils.notebook.plotting import init_notebook_plotting, render\n", "from pyre_extensions import assert_is_instance\n", + "import plotly.io as pio\n", "\n", "init_notebook_plotting()\n", + "if in_colab:\n", + " pio.renderers.default = \"colab\"\n", "\n", "# Transforms for pre-processing the data from a multi-type experiment to \n", "# construct a multi-task GP model.\n", @@ -85,11 +97,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "python" - } - }, + "metadata": {}, "outputs": [], "source": [ "SMOKE_TEST = os.environ.get(\"SMOKE_TEST\")" @@ -114,10 +122,7 @@ "metadata": { "code_folding": [], "hidden_ranges": [], - "originalKey": "2315ca64-74e5-4084-829e-e8a482c653e5", - "vscode": { - "languageId": "python" - } + "originalKey": "2315ca64-74e5-4084-829e-e8a482c653e5" }, "outputs": [], "source": [ @@ -161,10 +166,7 @@ "metadata": { "code_folding": [], "hidden_ranges": [], - "originalKey": "39504f84-793e-4dae-ae55-068f1b762706", - "vscode": { - "languageId": "python" - } + "originalKey": "39504f84-793e-4dae-ae55-068f1b762706" }, "outputs": [], "source": [ @@ -234,10 +236,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "originalKey": "8260b668-91ef-404e-aa8c-4bf43f6a5660", - "vscode": { - "languageId": "python" - } + "originalKey": "8260b668-91ef-404e-aa8c-4bf43f6a5660" }, "outputs": [], "source": [ @@ -272,10 +271,7 @@ "cell_type": "code", "execution_count": null, "metadata": { - "originalKey": "3d124563-8a1f-411e-9822-972568ce1970", - "vscode": { - "languageId": "python" - } + "originalKey": "3d124563-8a1f-411e-9822-972568ce1970" }, "outputs": [], "source": [ @@ -313,10 +309,7 @@ "metadata": { "code_folding": [], "hidden_ranges": [], - "originalKey": "040354c2-4313-46db-b40d-8adc8da6fafb", - "vscode": { - "languageId": "python" - } + "originalKey": "040354c2-4313-46db-b40d-8adc8da6fafb" }, "outputs": [], "source": [ @@ -368,11 +361,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "python" - } - }, + "metadata": {}, "outputs": [], "source": [ "def get_MTGP(\n", @@ -444,10 +433,7 @@ "metadata": { "code_folding": [], "hidden_ranges": [], - "originalKey": "37735b0e-e488-4927-a3da-a7d32d9f1ae0", - "vscode": { - "languageId": "python" - } + "originalKey": "37735b0e-e488-4927-a3da-a7d32d9f1ae0" }, "outputs": [], "source": [ @@ -550,10 +536,7 @@ "metadata": { "code_folding": [], "hidden_ranges": [], - "originalKey": "f94a7537-61a6-4200-8e56-01de41aff6c9", - "vscode": { - "languageId": "python" - } + "originalKey": "f94a7537-61a6-4200-8e56-01de41aff6c9" }, "outputs": [], "source": [ diff --git a/tutorials/multiobjective_optimization.ipynb b/tutorials/multiobjective_optimization/multiobjective_optimization.ipynb similarity index 99% rename from tutorials/multiobjective_optimization.ipynb rename to tutorials/multiobjective_optimization/multiobjective_optimization.ipynb index c52e040f982..6bff2384850 100644 --- a/tutorials/multiobjective_optimization.ipynb +++ b/tutorials/multiobjective_optimization/multiobjective_optimization.ipynb @@ -18,6 +18,18 @@ "To learn more about how to choose a threshold, see [Set Objective Thresholds to focus candidate generation in a region of interest](#Set-Objective-Thresholds-to-focus-candidate-generation-in-a-region-of-interest). See the [Service API Tutorial](/tutorials/gpei_hartmann_service.html) for more infomation on running experiments with the Service API." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "in_colab = 'google.colab' in sys.modules\n", + "if in_colab:\n", + " %pip install ax-platform" + ] + }, { "cell_type": "code", "execution_count": null, @@ -39,8 +51,11 @@ "# Plotting imports and initialization\n", "from ax.utils.notebook.plotting import init_notebook_plotting, render\n", "from botorch.test_functions.multi_objective import BraninCurrin\n", + "import plotly.io as pio\n", "\n", - "init_notebook_plotting()" + "init_notebook_plotting()\n", + "if in_colab:\n", + " pio.renderers.default = \"colab\"" ] }, { diff --git a/tutorials/raytune_pytorch_cnn.ipynb b/tutorials/raytune_pytorch_cnn/raytune_pytorch_cnn.ipynb similarity index 96% rename from tutorials/raytune_pytorch_cnn.ipynb rename to tutorials/raytune_pytorch_cnn/raytune_pytorch_cnn.ipynb index 805e7b6fdd8..c71b9537c9b 100644 --- a/tutorials/raytune_pytorch_cnn.ipynb +++ b/tutorials/raytune_pytorch_cnn/raytune_pytorch_cnn.ipynb @@ -16,6 +16,18 @@ "Ray 'Actors' are a simple and clean abstraction for replicating your Python classes across multiple workers and nodes. Each hyperparameter evaluation is asynchronously executed on a separate Ray actor and reports intermediate training progress back to RayTune. Upon reporting, RayTune then uses this information to performs actions such as early termination, re-prioritization, or checkpointing." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "in_colab = 'google.colab' in sys.modules\n", + "if in_colab:\n", + " %pip install ax-platform" + ] + }, { "cell_type": "code", "execution_count": null, @@ -51,8 +63,11 @@ "from ax.service.ax_client import AxClient\n", "from ax.utils.notebook.plotting import init_notebook_plotting, render\n", "from ax.utils.tutorials.cnn_utils import CNN, evaluate, load_mnist, train\n", + "import plotly.io as pio\n", "\n", - "init_notebook_plotting()" + "init_notebook_plotting()\n", + "if in_colab:\n", + " pio.renderers.default = \"colab\"" ] }, { diff --git a/tutorials/saasbo.ipynb b/tutorials/saasbo/saasbo.ipynb similarity index 97% rename from tutorials/saasbo.ipynb rename to tutorials/saasbo/saasbo.ipynb index 875415cde0e..0973d869817 100644 --- a/tutorials/saasbo.ipynb +++ b/tutorials/saasbo/saasbo.ipynb @@ -15,6 +15,19 @@ "[1] D. Eriksson, M. Jankowiak. High-Dimensional Bayesian Optimization with Sparse Axis-Aligned Subspaces. Proceedings of the Thirty-Seventh Conference on Uncertainty in Artificial Intelligence, 2021." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "import plotly.io as pio\n", + "if 'google.colab' in sys.modules:\n", + " pio.renderers.default = \"colab\"\n", + " %pip install ax-platform" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/tutorials/saasbo_nehvi.ipynb b/tutorials/saasbo_nehvi/saasbo_nehvi.ipynb similarity index 98% rename from tutorials/saasbo_nehvi.ipynb rename to tutorials/saasbo_nehvi/saasbo_nehvi.ipynb index d49edb5f7b3..6de7d7ae942 100644 --- a/tutorials/saasbo_nehvi.ipynb +++ b/tutorials/saasbo_nehvi/saasbo_nehvi.ipynb @@ -117,6 +117,18 @@ "- To learn about multi-objective optimization in Ax Service API: https://ax.dev/tutorials/multiobjective_optimization.html#Using-the-Service-API." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "in_colab = 'google.colab' in sys.modules\n", + "if in_colab:\n", + " %pip install ax-platform" + ] + }, { "cell_type": "code", "execution_count": null, @@ -178,7 +190,10 @@ "metadata": {}, "outputs": [], "source": [ - "init_notebook_plotting()" + "import plotly.io as pio\n", + "init_notebook_plotting()\n", + "if in_colab:\n", + " pio.renderers.default = \"colab\"" ] }, { diff --git a/tutorials/scheduler.ipynb b/tutorials/scheduler/scheduler.ipynb similarity index 99% rename from tutorials/scheduler.ipynb rename to tutorials/scheduler/scheduler.ipynb index 6ca9dc5cae1..650bd3ca127 100644 --- a/tutorials/scheduler.ipynb +++ b/tutorials/scheduler/scheduler.ipynb @@ -76,6 +76,18 @@ "An example of an 'external system' running trial evaluations could be a remote server executing scheduled jobs, a subprocess conducting ML training runs, an engine running physics simulations, etc. For the sake of example here, let us assume a dummy external system with the following client:" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "in_colab = 'google.colab' in sys.modules\n", + "if in_colab:\n", + " %pip install ax-platform" + ] + }, { "cell_type": "code", "execution_count": null, @@ -438,8 +450,11 @@ "import numpy as np\n", "from ax.plot.trace import optimization_trace_single_method\n", "from ax.utils.notebook.plotting import render, init_notebook_plotting\n", + "import plotly.io as pio\n", "\n", "init_notebook_plotting()\n", + "if in_colab:\n", + " pio.renderers.default = \"colab\"\n", "\n", "\n", "def get_plot():\n", diff --git a/tutorials/sebo.ipynb b/tutorials/sebo/sebo.ipynb similarity index 98% rename from tutorials/sebo.ipynb rename to tutorials/sebo/sebo.ipynb index 730ead6c532..d6abeba2d4d 100644 --- a/tutorials/sebo.ipynb +++ b/tutorials/sebo/sebo.ipynb @@ -24,6 +24,19 @@ "By following this tutorial, you will learn how to leverage the SEBO method through the Ax API, empowering you to effectively balance objectives and sparsity in your optimization tasks. Let's get started!" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "import plotly.io as pio\n", + "if 'google.colab' in sys.modules:\n", + " pio.renderers.default = \"colab\"\n", + " %pip install ax-platform" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/tutorials/submitit.ipynb b/tutorials/submitit/submitit.ipynb similarity index 99% rename from tutorials/submitit.ipynb rename to tutorials/submitit/submitit.ipynb index 155f1c57cd5..d671326c11c 100644 --- a/tutorials/submitit.ipynb +++ b/tutorials/submitit/submitit.ipynb @@ -16,6 +16,19 @@ "Let's start by importing the necessary libraries." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "import plotly.io as pio\n", + "if 'google.colab' in sys.modules:\n", + " pio.renderers.default = \"colab\"\n", + " %pip install ax-platform" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -7589,21 +7602,21 @@ "metadata": { "fileHeader": "", "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" }, "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" } }, "nbformat": 4, diff --git a/tutorials/tune_cnn_service.ipynb b/tutorials/tune_cnn_service/tune_cnn_service.ipynb similarity index 98% rename from tutorials/tune_cnn_service.ipynb rename to tutorials/tune_cnn_service/tune_cnn_service.ipynb index f2bcc0d90da..eecbf4e09c0 100644 --- a/tutorials/tune_cnn_service.ipynb +++ b/tutorials/tune_cnn_service/tune_cnn_service.ipynb @@ -14,6 +14,18 @@ "This tutorial walks through using Ax to tune two hyperparameters (learning rate and momentum) for a PyTorch CNN on the MNIST dataset trained using SGD with momentum." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "in_colab = 'google.colab' in sys.modules\n", + "if in_colab:\n", + " %pip install ax-platform" + ] + }, { "cell_type": "code", "execution_count": null, @@ -39,8 +51,11 @@ "from ax.utils.tutorials.cnn_utils import evaluate, load_mnist, train\n", "from torch._tensor import Tensor\n", "from torch.utils.data import DataLoader\n", + "import plotly.io as pio\n", "\n", - "init_notebook_plotting()" + "init_notebook_plotting()\n", + "if in_colab:\n", + " pio.renderers.default = \"colab\"" ] }, { diff --git a/tutorials/visualizations.ipynb b/tutorials/visualizations/visualizations.ipynb similarity index 96% rename from tutorials/visualizations.ipynb rename to tutorials/visualizations/visualizations.ipynb index 4603bcbb30e..80d23f712ef 100644 --- a/tutorials/visualizations.ipynb +++ b/tutorials/visualizations/visualizations.ipynb @@ -11,6 +11,18 @@ "This tutorial illustrates the core visualization utilities available in Ax." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "in_colab = 'google.colab' in sys.modules\n", + "if in_colab:\n", + " %pip install ax-platform" + ] + }, { "cell_type": "code", "execution_count": null, @@ -34,8 +46,11 @@ "from ax.service.ax_client import AxClient, ObjectiveProperties\n", "from ax.utils.measurement.synthetic_functions import hartmann6\n", "from ax.utils.notebook.plotting import init_notebook_plotting, render\n", + "import plotly.io as pio\n", "\n", - "init_notebook_plotting()" + "init_notebook_plotting()\n", + "if in_colab:\n", + " pio.renderers.default = \"colab\"" ] }, { @@ -386,8 +401,8 @@ }, "indentAmount": 2, "kernelspec": { - "name": "python3", - "display_name": "python3" + "display_name": "python3", + "name": "python3" } }, "nbformat": 4, diff --git a/website/README.md b/website/README.md new file mode 100644 index 00000000000..2961e06b2cd --- /dev/null +++ b/website/README.md @@ -0,0 +1,52 @@ +The Ax website is built using [Docusaurus](https://docusaurus.io/), a modern static website generator. + +## Building + +### Notebooks +We convert tutorial notebooks to MDX for embedding as docs. This needs to be done before serving the website and can be done by running this script from the project root: + +```bash +python3 scripts/convert_ipynb_to_mdx.py --clean +``` + +If the script fails ensure you have the necessary dependencies (ideally to your virtual env): + +```bash +pip install -e ".[tutorial]" +``` + +### Docusaurus +You need [Node](https://nodejs.org/en/) >= 18.x and +[Yarn](https://yarnpkg.com/en/) in order to build the Ax website. + +Switch to the `website` dir from the project root and start the server: +```bash +cd website +yarn +yarn start +``` + +Open http://localhost:3000 (if doesn't automatically open). + +Anytime you change the contents of the page, the page should auto-update. + +Note that you may need to switch to the "Next" version of the website documentation to see your latest changes. + +### All-in-one + +For convenience we provide a single shell script to convert the tutorials and build the website in one command: +```bash +./scripts/make_docs.sh +``` + +To generate a static build of the website in the `website/build` directory, run +```bash +./scripts/make_docs.sh -b +``` + +## Publishing + +The site is hosted on GitHub pages, via the `gh-pages` branch of the Ax +[GitHub repo](https://github.com/facebook/Ax/tree/gh-pages). +The website is automatically built and published from GitHub Actions - see the +[config file](https://github.com/facebook/Ax/blob/main/.github/workflows/publish_website.yml) for details. diff --git a/website/core/Footer.js b/website/core/Footer.js deleted file mode 100644 index f1c1792b09f..00000000000 --- a/website/core/Footer.js +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */ - -const React = require('react'); - -class Footer extends React.Component { - - docUrl(doc, language) { - const baseUrl = this.props.config.baseUrl; - const docsUrl = this.props.config.docsUrl; - const docsPart = `${docsUrl ? `${docsUrl}/` : ''}`; - const langPart = `${language ? `${language}/` : ''}`; - return `${baseUrl}${docsPart}${langPart}${doc}`; - } - - pageUrl(doc, language) { - const baseUrl = this.props.config.baseUrl; - return baseUrl + (language ? `${language}/` : '') + doc; - } - - render() { - const currentYear = new Date().getFullYear(); - - return ( - - ); - } -} - -module.exports = Footer; diff --git a/website/core/Tutorial.js b/website/core/Tutorial.js deleted file mode 100644 index a844f68a232..00000000000 --- a/website/core/Tutorial.js +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - * - * @format - */ - -const React = require('react'); - -const fs = require('fs-extra'); -const path = require('path'); -const CWD = process.cwd(); - -const CompLibrary = require(`${CWD}/node_modules/docusaurus/lib/core/CompLibrary.js`); -const Container = CompLibrary.Container; - -const TutorialSidebar = require(`${CWD}/core/TutorialSidebar.js`); - -function timeToMinAndSec(time) { - const mins = Math.floor(time / 60); - const total_secs = time - mins * 60; - const secs = Math.round(total_secs * 100) / 100; - return {mins, secs}; -} - -function renderDownloadIcon() { - return ( - - ); -} - -class Tutorial extends React.Component { - render() { - const {baseUrl, tutorialDir, tutorialID, totalExecTime} = this.props; - - let htmlFile = null; - let pyFile = null; - let ipynbFile = null; - let directoryDownloadButton = null; - let totalExecTimeText = null; - - if (tutorialDir != null && tutorialDir !== '') { - htmlFile = `${CWD}/_tutorials/${tutorialDir}/${tutorialID}.html`; - ipynbFile = `${baseUrl}files/${tutorialDir}/${tutorialID}.ipynb`; - pyFile = `${baseUrl}files/${tutorialDir}/${tutorialID}.py`; - directoryDownloadButton = ( - - ); - } else { - htmlFile = `${CWD}/_tutorials/${tutorialID}.html`; - ipynbFile = `${baseUrl}files/${tutorialID}.ipynb`; - pyFile = `${baseUrl}files/${tutorialDir}/${tutorialID}.py`; - } - const normalizedHtmlFile = path.normalize(htmlFile); - - if (totalExecTime != null) { - const minsAndSecs = timeToMinAndSec(totalExecTime); - const timeText = - 'Total runtime of script: ' + - (minsAndSecs['mins'] === 0 - ? `${minsAndSecs['secs']} seconds.` - : `${minsAndSecs['mins']} minutes, ${minsAndSecs['secs']} seconds.`); - - totalExecTimeText = ( -
-

{timeText}

-
- ); - } - - return ( -
- - - - ); - } -} - -module.exports = Tutorial; diff --git a/website/core/TutorialSidebar.js b/website/core/TutorialSidebar.js deleted file mode 100644 index 3ab142f22f7..00000000000 --- a/website/core/TutorialSidebar.js +++ /dev/null @@ -1,96 +0,0 @@ -/** - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - * - * @format - */ - -const React = require('react'); -const fs = require('fs-extra'); -const path = require('path'); -const join = path.join; -const CWD = process.cwd(); - -const CompLibrary = require(join( - CWD, - '/node_modules/docusaurus/lib/core/CompLibrary.js', -)); -const SideNav = require(join( - CWD, - '/node_modules/docusaurus/lib/core/nav/SideNav.js', -)); - -const Container = CompLibrary.Container; - -const OVERVIEW_ID = 'tutorial_overview'; - -class TutorialSidebar extends React.Component { - render() { - const {currentTutorialID} = this.props; - const current = { - id: currentTutorialID || OVERVIEW_ID, - }; - - const toc = [ - { - type: 'CATEGORY', - title: 'Tutorials', - children: [ - { - type: 'LINK', - item: { - permalink: 'tutorials/', - id: OVERVIEW_ID, - title: 'Overview', - }, - }, - ], - }, - ]; - - const jsonFile = join(CWD, 'tutorials.json'); - const normJsonFile = path.normalize(jsonFile); - const json = JSON.parse(fs.readFileSync(normJsonFile, {encoding: 'utf8'})); - - Object.keys(json).forEach(category => { - const categoryItems = json[category]; - const items = []; - categoryItems.map(item => { - let permalink = `tutorials/${item.id}.html`; - if ('dir' in item) { - permalink = `tutorials/${item.dir}/${item.id}.html`; - } - items.push({ - type: 'LINK', - item: { - permalink: permalink, - id: item.id, - title: item.title, - }, - }); - }); - - toc.push({ - type: 'CATEGORY', - title: category, - children: items, - }); - }); - - return ( - - - - ); - } -} - -module.exports = TutorialSidebar; diff --git a/website/docusaurus.config.js b/website/docusaurus.config.js new file mode 100644 index 00000000000..fc26e7e00e4 --- /dev/null +++ b/website/docusaurus.config.js @@ -0,0 +1,191 @@ +import {themes as prismThemes} from 'prism-react-renderer'; +import remarkMath from 'remark-math'; +import rehypeKatex from 'rehype-katex'; + +module.exports={ + "title": "Ax", + "tagline": "Adaptive Experimentation Platform", + "url": "https://ax.dev", + "baseUrl": "/", + "organizationName": "facebook", + "projectName": "Ax", + "scripts": [ + "https://cdn.plot.ly/plotly-latest.min.js", + "/Ax/js/plotUtils.js", + "https://buttons.github.io/buttons.js", + 'https://cdn.bokeh.org/bokeh/release/bokeh-2.4.2.min.js', + 'https://cdn.bokeh.org/bokeh/release/bokeh-widgets-2.4.2.min.js', + ], + "favicon": "img/favicon.png", + "customFields": { + "users": [], + "wrapPagesHTML": true + }, + "onBrokenLinks": "log", + "onBrokenMarkdownLinks": "log", + "future": { + "experimental_faster": true, + }, + "presets": [ + [ + "@docusaurus/preset-classic", + { + "docs": { + "showLastUpdateAuthor": true, + "showLastUpdateTime": true, + "path": "../docs", + "sidebarPath": "../website/sidebars.js", + remarkPlugins: [remarkMath], + rehypePlugins: [rehypeKatex], + }, + "blog": {}, + "theme": { + "customCss": "src/css/customTheme.css" + }, + "gtag": { + "trackingID": "UA-139570076-1" + } + } + ] + ], + stylesheets: [ + { + href: 'https://cdn.jsdelivr.net/npm/katex@0.13.24/dist/katex.min.css', + type: 'text/css', + integrity: + 'sha384-odtC+0UGzzFL/6PNoE8rX/SPcQDXBJ+uRepguP4QkPCm2LBxH3FA3y+fKSiJ+AmM', + crossorigin: 'anonymous', + }, + ], + "plugins": [ + [ + "@docusaurus/plugin-client-redirects", + { + "fromExtensions": [ + "html" + ] + } + ], + ], + "themeConfig": { + prism: { + theme: prismThemes.github, + darkTheme: prismThemes.oneDark, + }, + "navbar": { + "title": "Ax", + "hideOnScroll": true, + "logo": { + "src": "img/ax.svg", + }, + "items": [ + { + "type": "docSidebar", + "sidebarId": "docs", + "label": "Docs", + "position": "left" + }, + { + "type": "docSidebar", + "sidebarId": "tutorials", + "label": "Tutorials", + "position": "left" + }, + { + "href": "https://ax.readthedocs.io/", + "label": "API", + "position": "left", + "target": "_blank", + }, + { + type: 'docsVersionDropdown', + position: 'right', + dropdownItemsAfter: [ + { + type: 'html', + value: '
', + }, + { + type: 'html', + className: 'margin-horiz--sm text--bold', + value: 'Archived versions', + }, + { + href: 'https://archive.ax.dev/versions.html', + label: '0.x.x', + }, + ], + }, + { + "href": "https://github.com/facebook/Ax", + "className": "header-github-link", + "aria-label": "GitHub", + "position": "right" + }, + { + to: 'blog', + label: 'Blog', + position: 'left' + } + ] + }, + "docs": { + "sidebar": { + autoCollapseCategories: true, + hideable: true, + }, + }, + "image": "img/ax.svg", + "footer": { + style: 'dark', + "logo": { + alt: "Ax", + "src": "img/meta_opensource_logo_negative.svg", + }, + links: [ + { + title: 'Docs', + items: [ + { + label: 'Introduction', + to: 'docs/why-ax', + }, + ], + }, + { + title: 'Social', + items: [ + { + label: 'GitHub', + href: 'https://github.com/facebook/ax', + } + ], + }, + { + title: 'Legal', + // Please do not remove the privacy and terms, it's a legal requirement. + items: [ + { + label: 'Privacy', + href: 'https://opensource.facebook.com/legal/privacy/', + target: '_blank', + rel: 'noreferrer noopener', + }, + { + label: 'Terms', + href: 'https://opensource.facebook.com/legal/terms/', + target: '_blank', + rel: 'noreferrer noopener', + }, + ], + }, + ], + copyright: `Copyright © ${new Date().getFullYear()} Meta Platforms, Inc.`, + }, + algolia: { + appId: 'O2Q3QH4SYH', + apiKey: '330b76ae9b20640dacf7ef3e1256f584', + indexName: 'ax', + }, + } +} diff --git a/website/package.json b/website/package.json index 7d66debef68..a145a621fb9 100644 --- a/website/package.json +++ b/website/package.json @@ -1,14 +1,33 @@ { "scripts": { "examples": "docusaurus-examples", - "start": "docusaurus-start", - "build": "docusaurus-build", + "start": "DOCUSAURUS_IGNORE_SSG_WARNINGS=true docusaurus start", + "build": "DOCUSAURUS_IGNORE_SSG_WARNINGS=true docusaurus build", "publish-gh-pages": "docusaurus-publish", "write-translations": "docusaurus-write-translations", "version": "docusaurus-version", - "rename-version": "docusaurus-rename-version" + "rename-version": "docusaurus-rename-version", + "swizzle": "docusaurus swizzle", + "deploy": "docusaurus deploy", + "serve": "docusaurus serve", + "clear": "docusaurus clear", + "docusaurus": "docusaurus" }, - "devDependencies": { - "docusaurus": "^1.7.2" + "devDependencies": {}, + "dependencies": { + "@docusaurus/core": "3.6.3", + "@docusaurus/faster": "3.6.3", + "@docusaurus/plugin-client-redirects": "3.6.3", + "@docusaurus/preset-classic": "3.6.3", + "clsx": "^1.1.1", + "plotly.js": "^2.8.1", + "react": "^18.3.1", + "react-dom": "^18.3.1", + "react-plotly.js": "^2.5.1", + "rehype-katex": "7", + "remark-math": "6" + }, + "engines": { + "node": ">=18.0" } } diff --git a/website/pages/en/index.js b/website/pages/en/index.js deleted file mode 100644 index 723f6045f6d..00000000000 --- a/website/pages/en/index.js +++ /dev/null @@ -1,235 +0,0 @@ -/** - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - * - * @format - */ - -const React = require('react'); - -const CompLibrary = require('../../core/CompLibrary.js'); - -const MarkdownBlock = CompLibrary.MarkdownBlock; -const Container = CompLibrary.Container; -const GridBlock = CompLibrary.GridBlock; - -const bash = (...args) => `~~~bash\n${String.raw(...args)}\n~~~`; - -class HomeSplash extends React.Component { - render() { - const {siteConfig, language = ''} = this.props; - const {baseUrl, docsUrl} = siteConfig; - const docsPart = `${docsUrl ? `${docsUrl}/` : ''}`; - const langPart = `${language ? `${language}/` : ''}`; - const docUrl = doc => `${baseUrl}${docsPart}${langPart}${doc}`; - - const SplashContainer = props => ( -
-
-
{props.children}
-
-
- ); - - const Logo = props => ( -
- Project Logo -
- ); - - const ProjectTitle = () => ( -

- {siteConfig.tagline} -

- ); - - const PromoSection = props => ( -
-
-
{props.children}
-
-
- ); - - const Button = props => ( - - ); - - return ( - - -
- - - - - - -
-
- ); - } -} - -class Index extends React.Component { - render() { - const {config: siteConfig, language = ''} = this.props; - const {baseUrl} = siteConfig; - - const Block = props => ( - - - - ); - - const Description = () => ( - - {[ - { - content: - 'This is another description of how this project is useful', - image: `${baseUrl}img/docusaurus.svg`, - imageAlign: 'right', - title: 'Description', - }, - ]} - - ); - - const pre = '```'; - - const codeExample = `${pre}python ->>> from ax import optimize ->>> best_parameters, best_values, experiment, model = optimize( - parameters=[ - { - "name": "x1", - "type": "range", - "bounds": [-10.0, 10.0], - }, - { - "name": "x2", - "type": "range", - "bounds": [-10.0, 10.0], - }, - ], - # Booth function - evaluation_function=lambda p: (p["x1"] + 2*p["x2"] - 7)**2 + (2*p["x1"] + p["x2"] - 5)**2, - minimize=True, - ) - ->>> best_parameters -{'x1': 1.02, 'x2': 2.97} # true min is (1, 3) - `; - - const QuickStart = () => ( -
-

Get Started

- -
    -
  1. - Install Ax: - {bash`conda install pytorch torchvision -c pytorch # OSX only`} - {bash`pip3 install ax-platform # all systems`} -
  2. -
  3. - Run an optimization: - {codeExample} -
  4. -
-
-
- ); - - const Features = () => ( -
-

Key Features

- - {[ - { - content: - 'Easy to plug in new algorithms and use the library across ' + - 'different domains.', - image: `${baseUrl}img/th-large-solid.svg`, - imageAlign: 'top', - title: 'Modular', - }, - { - content: - 'Field experiments require a range of considerations ' + - 'beyond standard optimization problems.', - image: `${baseUrl}img/dice-solid.svg`, - imageAlign: 'top', - title: 'Supports A/B Tests', - }, - { - content: - 'Support for industry-grade experimentation ' + - 'and optimization management, including MySQL storage.', - image: `${baseUrl}img/database-solid.svg`, - imageAlign: 'top', - title: 'Production-Ready', - }, - ]} - -
- ); - - const Showcase = () => { - if ((siteConfig.users || []).length === 0) { - return null; - } - - const showcase = siteConfig.users - .filter(user => user.pinned) - .map(user => ( - - {user.caption} - - )); - - const pageUrl = page => baseUrl + (language ? `${language}/` : '') + page; - - return ( -
-

Who is Using This?

-

This project is used by all these people

-
{showcase}
- -
- ); - }; - - return ( -
- -
- - -
-
- ); - } -} - -module.exports = Index; diff --git a/website/pages/tutorials/index.js b/website/pages/tutorials/index.js deleted file mode 100644 index 263a30a972c..00000000000 --- a/website/pages/tutorials/index.js +++ /dev/null @@ -1,186 +0,0 @@ -/** - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - * - * @format - */ - -const React = require('react'); - -const CWD = process.cwd(); - -const CompLibrary = require( - `${CWD}/node_modules/docusaurus/lib/core/CompLibrary.js`, -); -const Container = CompLibrary.Container; - -const TutorialSidebar = require(`${CWD}/core/TutorialSidebar.js`); - -class TutorialHome extends React.Component { - render() { - return ( -
- - -
-
-

Welcome to Ax Tutorials

-
-

- Here you can learn about the structure and applications of Ax from - examples. -

-

- Our 3 API tutorials:  - Loop,  - Service, and  - Developer — are a - good place to start. Each tutorial showcases optimization on a - constrained Hartmann6 problem, with the Loop API being the - simplest to use and the Developer API being the most customizable. -

-

- - NOTE: We recommend the - Service API for the - vast majority of use cases. - - This API provides an ideal balance of flexibility and simplicity - for most users, and we are in the process of consolidating Ax - usage around it more formally. -

-

- - Further, we explore the different components available in Ax in - more detail. - {' '} - The components explored below serve to set up an experiment, - visualize its results, configure an optimization algorithm, run an - entire experiment in a managed closed loop, and combine BoTorch - components in Ax in a modular way. -

-
    -
  • - Visualizations  - illustrates the different plots available to view and understand - your results. -
  • -
-
    -
  • - GenerationStrategy  - steps through setting up a way to specify the optimization - algorithm (or multiple). A GenerationStrategy - is an important component of Service API and the - Scheduler. -
  • -
-
    -
  • - Scheduler  demonstrates an - example of a managed and configurable closed-loop optimization, - conducted in an asyncronous fashion. Scheduler is a - manager abstraction in Ax that deploys trials, polls them, and - uses their results to produce more trials. -
  • -
-
    -
  • - - Modular BoTorchModel - -   walks though a new beta-feature — an improved - interface between Ax and{' '} - BoTorch — which allows - for combining arbitrary BoTorch components like - AcquisitionFunction, Model, - AcquisitionObjective etc. into a single{' '} - Model in Ax. -
  • -
-

- Our other Bayesian Optimization tutorials include: -

- - -
    -
  • - Multi-Task Modeling -   illustrates multi-task Bayesian Optimization on a - constrained synthetic Hartmann6 problem. -
  • -
- - - - {/*
    -
  • - Benchmarking Suite -   demonstrates how to use the Ax benchmarking suite to - compare Bayesian Optimization algorithm performances and - generate a comparative report with visualizations. -
  • -
*/} -

- For experiments done in a real-life setting, refer to our field - experiments tutorials: -

-
    -
  • - Bandit Optimization -   shows how Thompson Sampling can be used to intelligently - reallocate resources to well-performing configurations in - real-time. -
  • -
- -
-
-
- ); - } -} - -module.exports = TutorialHome; diff --git a/website/sidebars.js b/website/sidebars.js new file mode 100644 index 00000000000..59391b7c014 --- /dev/null +++ b/website/sidebars.js @@ -0,0 +1,52 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + * + * @format + */ + +const tutorials = () => { + const allTutorialMetadata = require('./tutorials.json'); + const tutorialsSidebar = [{ + type: 'category', + label: 'Tutorials', + collapsed: false, + items: [ + { + type: 'doc', + id: 'tutorials/index', + label: 'Overview', + }, + ], + },]; + for (var category in allTutorialMetadata) { + const categoryItems = allTutorialMetadata[category]; + const items = []; + categoryItems.map(item => { + items.push({ + type: 'doc', + label: item.title, + id: `tutorials/${item.id}/index`, + }); + }); + + tutorialsSidebar.push({ + type: 'category', + label: category, + items: items, + }); + } + return tutorialsSidebar; +}; + +export default { + docs: { + "Introduction": ["why-ax"], + "Getting Started": ["installation", "api", "glossary"], + "Algorithms": ["bayesopt", "banditopt"], + "Components": ["core", "trial-evaluation", "data", "models", "storage"], + }, + tutorials: tutorials(), +}; diff --git a/website/sidebars.json b/website/sidebars.json deleted file mode 100644 index d83373ff632..00000000000 --- a/website/sidebars.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "docs": { - "Introduction": ["why-ax"], - "Getting Started": ["installation", "api", "glossary"], - "Algorithms": ["bayesopt", "banditopt"], - "Components": ["core", "trial-evaluation", "data", "models", "storage"] - } -} diff --git a/website/siteConfig.js b/website/siteConfig.js deleted file mode 100644 index ddc63117cc8..00000000000 --- a/website/siteConfig.js +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - * - * @format - */ - -// See https://docusaurus.io/docs/site-config for all the possible -// site configuration options. - -const baseUrl = '/'; - -// If true, include Algolia search bar when building site -// Note: this setting is toggled to false by publish_site.sh script, so -// it should not be renamed without modifying that script. -const includeAlgolia = true; - -// List of projects/orgs using your project for the users page. -const users = []; - -const siteConfig = { - title: 'Ax', - tagline: 'Adaptive Experimentation Platform', - url: 'https://ax.dev/', - baseUrl: baseUrl, - - // Used for publishing and more - projectName: 'Ax', - organizationName: 'facebook', - - // Google analytics - gaTrackingId: 'UA-139570076-1', - - // For no header links in the top nav bar -> headerLinks: [], - headerLinks: [ - {doc: 'why-ax', label: 'Docs'}, - {href: `${baseUrl}tutorials/`, label: 'Tutorials'}, - {href: `${baseUrl}api/`, label: 'API'}, - // Search can be enabled when site is online - // {search: true}, - {href: 'https://github.com/facebook/Ax', label: 'GitHub'}, - ], - - // If you have users set above, you add it here: - users, - - /* path to images for header/footer */ - headerIcon: 'img/ax_lockup_white.svg', - footerIcon: 'img/ax.svg', - favicon: 'img/favicon.png', - - /* Colors for website */ - colors: { - primaryColor: '#1F2833', - secondaryColor: '#C5C6C7', - }, - - highlight: { - // Highlight.js theme to use for syntax highlighting in code blocks. - theme: 'default', - }, - - // Custom scripts that are placed in of each page - scripts: [ - 'https://cdn.plot.ly/plotly-latest.min.js', - `${baseUrl}js/plotUtils.js`, - 'https://buttons.github.io/buttons.js', - `${baseUrl}js/mathjax.js`, - 'https://cdn.jsdelivr.net/npm/mathjax@2/MathJax.js?config=TeX-AMS_SVG', - ], - - // On page navigation for the current documentation page. - onPageNav: 'separate', - // Use .html extensions for paths. - cleanUrl: false, - - // Open Graph and Twitter card images. - ogImage: 'img/ax.svg', - twitterImage: 'img/ax.svg', - - // Show documentation's last contributor's name. - // enableUpdateBy: true, - - // Show documentation's last update time. - // enableUpdateTime: true, - - // enable scroll to top button a the bottom of the site - scrollToTop: true, - - wrapPagesHTML: true, -}; - -if (includeAlgolia == true) { - siteConfig['algolia'] = { - apiKey: '467d4f1f6cace3ecb36ab551cb44905b', - indexName: 'ax', - algoliaOptions: {}, // Optional, if provided by Algolia - }; -} - -module.exports = siteConfig; diff --git a/website/src/components/CellOutput.jsx b/website/src/components/CellOutput.jsx new file mode 100644 index 00000000000..434139c67ec --- /dev/null +++ b/website/src/components/CellOutput.jsx @@ -0,0 +1,37 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +import React from 'react'; +import {v4 as uuidv4} from 'uuid'; + +const CellOutput = (props) => { + return ( +
+ Out: +
+        {props.children.split('\n').map((line) => {
+          return (
+            

+ {line} +

+ ); + })} +
+
+ ); +}; + +export default CellOutput; diff --git a/website/src/components/LinkButtons.jsx b/website/src/components/LinkButtons.jsx new file mode 100644 index 00000000000..d55f50593b2 --- /dev/null +++ b/website/src/components/LinkButtons.jsx @@ -0,0 +1,27 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +import React from 'react'; +import Link from '@docusaurus/Link'; +import IconExternalLink from '@theme/Icon/ExternalLink' + +const LinkButtons = ({githubUrl, colabUrl}) => { + return ( +
+ + Open in GitHub + + + + Run in Google Colab + + +
+ ); +}; + +export default LinkButtons; diff --git a/website/src/components/Plotting.jsx b/website/src/components/Plotting.jsx new file mode 100644 index 00000000000..ef49acbe4b8 --- /dev/null +++ b/website/src/components/Plotting.jsx @@ -0,0 +1,49 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +import React from 'react'; +import Loadable from 'react-loadable'; +import BrowserOnly from '@docusaurus/BrowserOnly'; +import '../css/bokeh.css'; + +export const BokehFigure = React.memo(({data}) => { + const targetId = data['target_id']; + return ( +
+ loading...
}> + {() => { + { + window.Bokeh.embed.embed_item(data, targetId); + } + }} + +
+ ); +}); + +const Plotly = Loadable({ + loader: () => import(`react-plotly.js`), + loading: ({timedOut}) => + timedOut ? ( +
Error: Loading Plotly timed out.
+ ) : ( +
loading...
+ ), + timeout: 10000, +}); + +export const PlotlyFigure = React.memo(({data}) => { + return ( +
+ +
+ ); +}); diff --git a/website/src/css/bokeh.css b/website/src/css/bokeh.css new file mode 100644 index 00000000000..5d74d372d37 --- /dev/null +++ b/website/src/css/bokeh.css @@ -0,0 +1,33 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + + .bk.bm-tool-loading { + overflow: hidden; + } + .bk.bm-tool-loading:before { + position: absolute; + height: 100%; + width: 100%; + content: ''; + z-index: 1000; + background-color: rgb(255, 255, 255, 0.75); + border-color: lightgray; + background-repeat: no-repeat; + background-position: center; + background-size: auto 50%; + border-width: 1px; + cursor: wait; + } + .bk.bm-tool-loading.arcs:hover:before { + content: 'Please select a Query from the Select menu above.'; + font: x-large Arial, sans-serif; + color: black; + cursor: progress; + display: flex; + justify-content: center; + align-items: center; + } diff --git a/website/src/css/customTheme.css b/website/src/css/customTheme.css new file mode 100644 index 00000000000..44610ee4f7e --- /dev/null +++ b/website/src/css/customTheme.css @@ -0,0 +1,37 @@ +:root{ + /* --ifm-color-primary-lightest: #283442; + --ifm-color-primary-lighter: #242E3B; + --ifm-color-primary-light: #222C38; */ + --ifm-color-primary: #4D5CD4; + /* --ifm-color-primary-dark: #1C242E; + --ifm-color-primary-darker: #1A222B; + --ifm-color-primary-darkest: #161C24; */ +} + +.hero { + background: linear-gradient( + 0deg, + rgba(2, 0, 36, 1), + rgba(91, 24, 97, 1) 0%, + rgba(72, 114, 249, 1) 100% + ); +} + +.hero .button.button--secondary.button--outline:not(.button--active):not(:hover) { + color: var(--ifm-color-secondary); +} + +.header-github-link::before { + content: ''; + width: 24px; + height: 24px; + display: flex; + background-color: var(--ifm-navbar-link-color); + mask-image: url("data:image/svg+xml,%3Csvg viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12'/%3E%3C/svg%3E"); + transition: background-color var(--ifm-transition-fast) + var(--ifm-transition-timing-default); +} + +.header-github-link:hover::before { + background-color: var(--ifm-navbar-link-hover-color); +} diff --git a/website/src/pages/index.js b/website/src/pages/index.js new file mode 100644 index 00000000000..6052e3b17b2 --- /dev/null +++ b/website/src/pages/index.js @@ -0,0 +1,169 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + * + * @format + */ + +import React from 'react'; +import Link from '@docusaurus/Link'; +import useDocusaurusContext from '@docusaurus/useDocusaurusContext'; +import useBaseUrl from '@docusaurus/useBaseUrl'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; +import Layout from "@theme/Layout"; + +const features = [ + { + content: + 'Easy to plug in new algorithms and use the library across ' + + 'different domains.', + image: 'img/th-large-solid.svg', + title: 'Modular', + }, + { + content: + 'Field experiments require a range of considerations ' + + 'beyond standard optimization problems.', + image: 'img/dice-solid.svg', + title: 'Supports A/B Tests', + }, + { + content: + 'Support for industry-grade experimentation ' + + 'and optimization management, including MySQL storage.', + image: 'img/database-solid.svg', + title: 'Production-Ready', + }, +]; + +const Feature = ({imageUrl, title, content, image}) => { + const imgUrl = useBaseUrl(imageUrl); + return ( +
+ {imgUrl && ( +
+ {title} +
+ )} + {image && ( +
+ {title} +
+ )} +

{title}

+

{content}

+
+ ); +} + +const codeExample = `>>> from ax import optimize +>>> best_parameters, best_values, experiment, model = optimize( + parameters=[ + { + "name": "x1", + "type": "range", + "bounds": [-10.0, 10.0], + }, + { + "name": "x2", + "type": "range", + "bounds": [-10.0, 10.0], + }, + ], + # Booth function + evaluation_function=lambda p: (p["x1"] + 2*p["x2"] - 7)**2 + (2*p["x1"] + p["x2"] - 5)**2, + minimize=True, + ) + +>>> best_parameters +{'x1': 1.02, 'x2': 2.97} # true min is (1, 3)`; + +const QuickStart = () => ( +
+

Get Started

+
    +
  1. + Install Ax: + + + {`pip3 install ax-platform`} + + + {`conda install pytorch torchvision -c pytorch +pip3 install ax-platform`} + + +
  2. +
  3. + Run an optimization: +

    + {codeExample} +
  4. +
+
+); + +const MyPage = () => { + const {siteConfig} = useDocusaurusContext(); + return ( + +
+
+
+ Project Logo +

{siteConfig.tagline}

+
+
+ + Why Ax? + + + Get started + + + Tutorials + +
+
+
+
+

Key Features

+ {features && features.length > 0 && ( +
+ {features.map(({title, imageUrl, content, image}) => ( + + ))} +
+ )} +
+ +
+ ); +}; + +export default MyPage; diff --git a/website/static/css/base_sphinx.css b/website/static/css/base_sphinx.css deleted file mode 100644 index b8dbd02b959..00000000000 --- a/website/static/css/base_sphinx.css +++ /dev/null @@ -1,676 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox form.search { - overflow: hidden; -} - -div.sphinxsidebar #searchbox input[type="text"] { - float: left; - width: 80%; - padding: 0.25em; - box-sizing: border-box; -} - -div.sphinxsidebar #searchbox input[type="submit"] { - float: left; - width: 20%; - border-left: none; - padding: 0.25em; - box-sizing: border-box; -} - - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body { - min-width: 450px; - max-width: 800px; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px 7px 0 7px; - background-color: #ffe; - width: 40%; - float: right; -} - -p.sidebar-title { - font-weight: bold; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - border: 0; - border-collapse: collapse; -} - -table.align-center { - margin-left: auto; - margin-right: auto; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -/* -- hlist styles ---------------------------------------------------------- */ - -table.hlist td { - vertical-align: top; -} - - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -dl { - margin-bottom: 15px; -} - -dd p { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dt:target, span.highlighted { - background-color: #fbe54e; -} - -rect.highlighted { - fill: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -div.code-block-caption { - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -div.code-block-caption + div > div.highlight > pre { - margin-top: 0; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: relative; - left: 0px; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} diff --git a/website/static/css/custom.css b/website/static/css/custom.css deleted file mode 100644 index dceb1f5539c..00000000000 --- a/website/static/css/custom.css +++ /dev/null @@ -1,315 +0,0 @@ -/** - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */ - -@import url('https://fonts.googleapis.com/css?family=Montserrat'); -@import url('https://fonts.googleapis.com/css?family=IBM+Plex+Mono'); - -html body { - font-family: 'Montserrat', sans-serif; - overflow-x: hidden; -} - -.fixedHeaderContainer { - background-color: #222222; -} - -.fixedHeaderContainer header .headerTitleWithLogo { - display: none; -} - -.fixedHeaderContainer header a:nth-child(2) { - position: absolute; - right: 0px; -} - -.fixedHeaderContainer header a:nth-child(2) h3 { - font-size: 14px; -} - -.fixedHeaderContainer header a:nth-child(2) h3::before { - content: 'v: '; -} - -.navigationSlider { - margin-right: 80px; -} - -.navigationSlider .slidingNav ul { - background: #222222; -} - -.navigationSlider .slidingNav ul li a { - color: #c7d4fd; -} - -.navigationSlider .slidingNav ul li a:hover, -.navigationSlider .slidingNav ul li a:focus { - color: #ffffff; - background-color: inherit; -} - -.navigationSlider .slidingNav ul li.siteNavItemActive > a, -.navigationSlider .slidingNav ul li.siteNavGroupActive > a { - background-color: inherit; -} - -.homeContainer { - background: rgb(2, 0, 36); - background: linear-gradient( - 0deg, - rgba(2, 0, 36, 1), - rgba(91, 24, 97, 1) 0%, - rgba(72, 114, 249, 1) 100% - ); - padding: 50px 0px; -} - -.splashLogo { - display: block; - margin: 0 auto; - height: 200px; - width: 200px; -} - -.projectTitle { - color: #ffffff; - font-variant: small-caps; - font-weight: 300; -} - -.promoSection .button { - border: 1px solid #fff; - color: #ffffff; -} - -.promoSection .button:hover { - background: inherit; - border: 1px solid #ffffff; - color: #ffffff; -} - -.landingPage { - padding: 0px; -} - -div.productShowcaseSection { - color: #6c6c6c; - padding-top: 40px; -} - -.productShowcaseSection > h2 { - font-variant: small-caps; - font-weight: 360; - margin: 0px; - padding: 0px; - color: #5b1861; -} - -.productShowcaseSection p { - font-weight: 360; -} - -.productShowcaseSection div.container { - padding: 40px 0px; -} - -.productShowcaseSection div.blockImage { - height: 80px; -} - -.productShowcaseSection li { - padding: 10px 0; -} - -.productShowcaseSection pre { - margin: 10px 0; -} - -.productShowcaseSection code { - background: #fff; -} - -.container .wrapper .alignCenter h2 { - color: #222222; -} - -div#quickstart { - background: #efefef; -} - -div#quickstart ol { - margin-bottom: 0px; -} - -.nav-footer { - background-color: #222222; -} - -.nav-footer .sitemap a { - color: #c7d4fd; -} - -.nav-footer .sitemap a:hover { - color: #ffffff; -} - -a, -p a { - color: #4872f9; -} - -a:hover, -p a:hover { - color: #4872f9; -} - -/* Style docs */ -.toc .toggleNav .navGroup .navGroupCategoryTitle { - color: #222222; -} - -.toc .toggleNav ul li a { - color: #6c6c6c; -} - -.toc .toggleNav ul li a:hover { - color: #5b1861; -} - -.toc .toggleNav .navGroup .navListItemActive a { - color: #4872f9; -} - -.mainContainer .wrapper .post .postHeaderTitle { - color: #222222; -} - -.mainContainer .wrapper .post h1, -.mainContainer .wrapper .post h2, -.mainContainer .wrapper .post h3 { - color: #222222; -} - -.mainContainer .wrapper .post { - color: #6c6c6c; -} - -.mainContainer .wrapper .post strong { - color: #222222; -} - -a.docs-next, -a.docs-prev { - color: #4872f9; - border: 1px solid #4872f9; -} - -a.docs-next:hover, -a.docs-prev:hover { - background-color: #4872f9; - border: 1px solid #4872f9; - color: #fff; -} - -/* Style tutorials */ -.tutorialBody { - margin-top: -20px; - color: #6c6c6c; -} - -.tutorialBody h1 { - margin: 0px; -} - -.tutorialBody h1, -.tutorialBody h2, -.tutorialBody h3 { - color: #222222; -} - -.tutorialBody pre { - font-family: 'IBM Plex Mono', monospace; - font-size: 14px; - margin: 0px; -} - -.tutorialBody .input_prompt, -.tutorialBody .output_prompt { - color: darkred; - font-size: 12px; -} - -.tutorialBody .highlight { - background: #f3f4f7; - padding: 10px 20px; - border: lightgray 1px solid; - border-radius: 3px; -} - -.tutorialBody .cell { - margin: 20px; -} - -.tutorialBody .output_stderr { - background-color: #fdede9; -} - -.tutorialBody .anchor-link { - color: lightgray; -} - -.tutorialBody iframe { - width: 100%; - height: 100vh; -} - -.tutorialButtonWrapper, -.tutorialRuntime { - margin: 20px; -} - -.tutorialButton { - color: #4872f9; - border: 1px solid #4872f9; -} - -.tutorialButton svg { - height: 15px; - margin-right: 5px; -} - -.tutorialButton:hover { - color: #4872f9; - background-color: inherit; -} - -.wrapper { - max-width: 1400px; -} - -@media only screen and (min-width: 1024px) { -} - -@media only screen and (max-width: 1023px) { - .fixedHeaderContainer header a:nth-child(2) { - position: absolute; - right: 200px; - } -} - -@media only screen and (min-device-width: 360px) and (max-device-width: 736px) { - .fixedHeaderContainer header a:nth-child(2) { - position: absolute; - right: 150px; - } -} - -@media only screen and (min-width: 1400px) { -} - -@media only screen and (min-width: 1500px) { -} diff --git a/website/static/css/custom_sphinx.css b/website/static/css/custom_sphinx.css deleted file mode 100644 index 97069b1004e..00000000000 --- a/website/static/css/custom_sphinx.css +++ /dev/null @@ -1,816 +0,0 @@ -/* -Custom styling for Sphinx, based on Alabaster theme. - -Alabaster theme forked from https://github.com/bitprophet/alabaster/. See -original license below. - -Copyright (c) 2019 Jeff Forcier. - -Based on original work copyright (c) 2011 Kenneth Reitz and copyright (c) 2010 -Armin Ronacher. - -Some rights reserved. - -Redistribution and use in source and binary forms of the theme, with or -without modification, are permitted provided that the following conditions -are met: - -* Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - -* The names of the contributors may not be used to endorse or - promote products derived from this software without specific - prior written permission. - -THIS THEME IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS THEME, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. -*/ - -@import url('base_sphinx.css'); - -/* -- page layout ----------------------------------------------------------- */ - -div.sphinx div.document { - margin: 30px auto; - max-width: 1400px; - padding: 0px 20px; - width: 100%; -} - -div.sphinx div.documentwrapper { - float: left; - width: 100%; -} - -div.sphinx div.bodywrapper { - margin: 0 0 0 220px; -} - -div.sphinx div.sphinxsidebar { - width: 220px; - font-size: 14px; - line-height: 1.5; -} - -div.sphinx hr { - border: 1px solid #b1b4b6; -} - -div.sphinx div.body { - background-color: #fff; - color: #3e4349; - max-width: none; - padding: 0 60px 0 60px; -} - -div.sphinx div.body > .section { - text-align: left; -} - -div.sphinx p.caption { - font-family: inherit; - font-size: inherit; -} - -div.sphinx div.relations { - display: none; -} - -div.sphinx div.sphinxsidebar a { - color: #444; - text-decoration: none; - border-bottom: 1px dotted #999; -} - -div.sphinx div.sphinxsidebar a:hover { - border-bottom: 1px solid #999; -} - -div.sphinx div.sphinxsidebarwrapper { - padding: 18px 10px; -} - -div.sphinx div.sphinxsidebarwrapper p.logo { - padding: 0; - margin: -10px 0 0 0px; - text-align: center; -} - -div.sphinx div.sphinxsidebarwrapper h1.logo { - margin-top: -10px; - text-align: center; - margin-bottom: 5px; - text-align: left; -} - -div.sphinx div.sphinxsidebarwrapper h1.logo-name { - margin-top: 0px; -} - -div.sphinx div.sphinxsidebarwrapper p.blurb { - margin-top: 0; - font-style: normal; -} - -div.sphinx div.sphinxsidebar h3, -div.sphinx div.sphinxsidebar h4 { - color: #444; - font-size: 24px; - font-weight: normal; - margin: 0 0 5px 0; - padding: 0; -} - -div.sphinx div.sphinxsidebar h4 { - font-size: 20px; -} - -div.sphinx div.sphinxsidebar h3 a { - color: #444; -} - -div.sphinx div.sphinxsidebar p.logo a, -div.sphinx div.sphinxsidebar h3 a, -div.sphinx div.sphinxsidebar p.logo a:hover, -div.sphinx div.sphinxsidebar h3 a:hover { - border: none; -} - -div.sphinx div.sphinxsidebar p { - color: #555; - margin: 10px 0; -} - -div.sphinx div.sphinxsidebar ul { - margin: 10px 0; - padding: 0; - color: #000; -} - -div.sphinx div.sphinxsidebar ul li.toctree-l1 > a { - font-size: 120%; -} - -div.sphinx div.sphinxsidebar ul li.toctree-l2 { - margin-left: 25px; -} - -div.sphinx div.sphinxsidebar ul li.toctree-l2 > a { - font-size: 110%; -} - -div.sphinx div.sphinxsidebar input { - border: 1px solid #ccc; - font-size: 1em; -} - -div.sphinx div.sphinxsidebar hr { - border: none; - height: 1px; - color: #aaa; - background: #aaa; - text-align: left; - margin-left: 0; - width: 50%; -} - -div.sphinx div.sphinxsidebar .badge { - border-bottom: none; -} - -div.sphinx div.sphinxsidebar .badge:hover { - border-bottom: none; -} - -/* To address an issue with donation coming after search */ -div.sphinx div.sphinxsidebar h3.donation { - margin-top: 10px; -} - -/* -- body styles ----------------------------------------------------------- */ - -div.sphinx a { - color: #004b6b; - text-decoration: underline; -} - -div.sphinx a:hover { - color: #6d4100; - text-decoration: underline; -} - -div.sphinx div.body h1, -div.sphinx div.body h2, -div.sphinx div.body h3, -div.sphinx div.body h4, -div.sphinx div.body h5, -div.sphinx div.body h6 { - font-weight: normal; - margin: 30px 0px 10px 0px; - padding: 0; -} - -div.sphinx div.body h1 { - margin-top: 0; - padding-top: 0; - font-size: 240%; -} -div.sphinx div.body h2 { - font-size: 180%; -} -div.sphinx div.body h3 { - font-size: 150%; -} -div.sphinx div.body h4 { - font-size: 130%; -} -div.sphinx div.body h5 { - font-size: 100%; -} -div.sphinx div.body h6 { - font-size: 100%; -} - -div.sphinx a.headerlink { - color: #ddd; - padding: 0 4px; - text-decoration: none; -} - -div.sphinx a.headerlink:hover { - color: #444; - background: #eaeaea; -} - -div.sphinx div.body p, -div.sphinx div.body dd, -div.sphinx div.body li { - line-height: 1.4em; -} - -div.sphinx div.admonition { - margin: 20px 0px; - padding: 10px 30px; - background-color: #eee; - border: 1px solid #ccc; -} - -div.sphinx div.admonition tt.xref, -div.sphinx div.admonition code.xref, -div.sphinx div.admonition a tt { - background-color: #fbfbfb; - border-bottom: 1px solid #fafafa; -} - -div.sphinx div.admonition p.admonition-title { - font-weight: normal; - font-size: 24px; - margin: 0 0 10px 0; - padding: 0; - line-height: 1; -} - -div.sphinx div.admonition p.last { - margin-bottom: 0; -} - -div.sphinx div.highlight { - background-color: #fff; -} - -div.sphinx dt:target, -.highlight { - background: #faf3e8; -} - -div.sphinx div.warning { - background-color: #fcc; - border: 1px solid #faa; -} - -div.sphinx div.danger { - background-color: #fcc; - border: 1px solid #faa; - -moz-box-shadow: 2px 2px 4px #d52c2c; - -webkit-box-shadow: 2px 2px 4px #d52c2c; - box-shadow: 2px 2px 4px #d52c2c; -} - -div.sphinx div.error { - background-color: #fcc; - border: 1px solid #faa; - -moz-box-shadow: 2px 2px 4px #d52c2c; - -webkit-box-shadow: 2px 2px 4px #d52c2c; - box-shadow: 2px 2px 4px #d52c2c; -} - -div.sphinx div.caution { - background-color: #fcc; - border: 1px solid #faa; -} - -div.sphinx div.attention { - background-color: #fcc; - border: 1px solid #faa; -} - -div.sphinx div.important { - background-color: #eee; - border: 1px solid #ccc; -} - -div.sphinx div.note { - background-color: #eee; - border: 1px solid #ccc; -} - -div.sphinx div.tip { - background-color: #eee; - border: 1px solid #ccc; -} - -div.sphinx div.hint { - background-color: #eee; - border: 1px solid #ccc; -} - -div.sphinx div.seealso { - background-color: #eee; - border: 1px solid #ccc; -} - -div.sphinx div.topic { - background-color: #eee; -} - -div.sphinx p.admonition-title { - display: inline; -} - -div.sphinx p.admonition-title:after { - content: ':'; -} - -/* Code styling */ -div.sphinx pre, -div.sphinx tt, -div.sphinx code { - font-family: SFMono-Regular, Menlo, Monaco, Consolas, 'Liberation Mono', - 'Courier New', monospace; - font-size: 0.9em; - background-color: #f5f5f5; -} - -div.sphinx .hll { - background-color: #ffc; - margin: 0 -12px; - padding: 0 12px; - display: block; -} - -div.sphinx tt.descname, -div.sphinx tt.descclassname, -div.sphinx code.descname, -div.sphinx code.descclassname { - font-size: 0.9em; -} - -div.sphinx tt.descname, -div.sphinx code.descname { - font-weight: bold; - padding-right: 0.08em; - padding-left: 0px; -} - -div.sphinx tt.descclassname, -div.sphinx code.descclassname { - background-color: transparent; - padding-right: 0px; -} - -/* Screenshot */ -div.sphinx img.screenshot { - -moz-box-shadow: 2px 2px 4px #eee; - -webkit-box-shadow: 2px 2px 4px #eee; - box-shadow: 2px 2px 4px #eee; -} - -/* Table */ -div.sphinx table { - overflow: hidden; -} - -div.sphinx table tr { - border-top: none; -} - -div.sphinx table .field-even { - background-color: transparent; -} - -div.sphinx table.docutils { - border: 1px solid #888; - -moz-box-shadow: 2px 2px 4px #eee; - -webkit-box-shadow: 2px 2px 4px #eee; - box-shadow: 2px 2px 4px #eee; -} - -div.sphinx table.docutils td, -div.sphinx table.docutils th { - border: 1px solid #888; - padding: 0.25em 0.7em; -} - -div.sphinx table.field-list, -div.sphinx table.footnote { - border: none; - -moz-box-shadow: none; - -webkit-box-shadow: none; - box-shadow: none; -} - -div.sphinx table.footnote { - margin: 15px 0; - width: 100%; - border: 1px solid #eee; - background: #fdfdfd; - font-size: 0.9em; -} - -div.sphinx table.footnote + div.sphinx table.footnote { - margin-top: -15px; - border-top: none; -} - -div.sphinx table.field-list th { - padding: 0 0.8em 0 0; -} - -div.sphinx table.field-list td { - padding: 0; -} - -div.sphinx table.field-list p { - margin-bottom: 0.8em; -} - -/* Cloned from - * https://github.com/sphinx-doc/sphinx/commit/ef60dbfce09286b20b7385333d63a60321784e68 - */ -div.sphinx .field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -div.sphinx table.footnote td.label { - width: 0.1px; - padding: 0.3em 0 0.3em 0.5em; -} - -div.sphinx table.footnote td { - padding: 0.3em 0.5em; -} - -div.sphinx dl { - margin: 15px 0 0px 0; - padding: 0; -} - -div.sphinx dl.class, -div.sphinx dl.function, -div.sphinx, -dl.exception { - margin-bottom: 50px; - padding: 0; -} - -div.sphinx dl.class > dt, -div.sphinx dl.function > dt, -div.sphinx dl.exception > dt { - background-color: #f5f5f5; - border-top: 3px solid #4872f9; - padding: 5px 0px 5px 5px; -} - -div.sphinx dl.class > dt > em:not(.property), -div.sphinx dl.exception > dt > em:not(.property), -div.sphinx dl.function > dt > em:not(.property), -div.sphinx dl.method > dt > em:not(.property), -div.sphinx dl.staticmethod > dt > em:not(.property) { - font-family: SFMono-Regular, Menlo, Monaco, Consolas, 'Liberation Mono', - 'Courier New', monospace; - font-size: 0.9em; -} - -div.sphinx dl.class > dt > em.property, -div.sphinx dl.exception > dt > em.property, -div.sphinx dl.staticmethod > dt > .property, -div.sphinx dl.classmethod > dt > .property { - color: #4872f9; - font-style: normal; - font-variant: small-caps; -} - -div.sphinx dl.method > dt, -div.sphinx dl.attribute > dt, -div.sphinx dl.staticmethod > dt, -div.sphinx dl.classmethod > dt { - background-color: #f5f5f5; - border-left: 3px solid #4872f9; - padding: 3px 0px 3px 5px; -} - -div.sphinx dl dd { - margin-left: 30px; -} - -div.sphinx blockquote { - margin: 0 0 0 30px; - padding: 0; -} - -div.sphinx ul, -div.sphinx ol { - /* Matches the 30px from the narrow-screen "li > ul" selector below */ - margin: 10px 0 10px 30px; - padding: 0; -} - -div.sphinx pre { - background: #eee; - padding: 7px 30px; - margin: 15px 0px; - line-height: 1.3em; -} - -div.sphinx span.viewcode-link { - color: #6c6c6c; - font-size: 0.8em; - margin-right: 5px; - margin-top: 2px; -} - -div.sphinx a.viewcode-back { - color: #6c6c6c; - font-size: 0.8em; -} - -div.sphinx div.highlight { - max-width: 850px; -} - -div.sphinx div.highlight pre { - background: #f5f5f5; - padding-left: 15px; - padding-top: 15px; -} - -div.sphinx div.viewcode-block:target { - background: #ffffe3; - margin: -1px -10px; - padding: 5px 10px; -} - -div.sphinx dl pre, -div.sphinx blockquote pre, -div.sphinx li pre { - margin-left: 0; - padding-left: 30px; -} - -div.sphinx tt, -div.sphinx code { - background-color: #f5f5f5; - color: #222; -} - -div.sphinx tt.xref, -div.sphinx code.xref, -div.sphinx a tt { - background-color: #f5f5f5; - border-bottom: 1px solid #fff; -} - -div.sphinx a.reference { - text-decoration: none; - border-bottom: 1px dotted #004b6b; -} - -div.sphinx a.reference > code { - color: #4872f9; -} - -/* Don't put an underline on images */ -div.sphinx a.image-reference, -div.sphinx a.image-reference:hover { - border-bottom: none; -} - -div.sphinx a.reference:hover { - border-bottom: 1px solid #6d4100; -} - -div.sphinx a.reference:hover > code { - color: #5b1861; -} - -div.sphinx a.footnote-reference { - text-decoration: none; - font-size: 0.7em; - vertical-align: top; - border-bottom: 1px dotted #004b6b; -} - -div.sphinx a.footnote-reference:hover { - border-bottom: 1px solid #6d4100; -} - -div.sphinx a:hover tt, -div.sphinx a:hover code { - background: #eee; -} - -@media screen and (max-width: 870px) { - div.sphinx div.sphinxsidebar { - display: none; - } - - div.sphinx div.document { - width: 100%; - } - - div.sphinx div.documentwrapper { - margin-left: 0; - margin-top: 0; - margin-right: 0; - margin-bottom: 0; - } - - div.sphinx div.bodywrapper { - margin-top: 0; - margin-right: 0; - margin-bottom: 0; - margin-left: 0; - } - - div.sphinx ul { - margin-left: 0; - } - - div.sphinx li > ul { - /* Matches the 30px from the "ul, ol" selector above */ - margin-left: 30px; - } - - div.sphinx .document { - width: auto; - } - - div.sphinx .bodywrapper { - margin: 0; - } - - div.sphinx .github { - display: none; - } -} - -@media screen and (max-width: 875px) { - div.sphinx div.documentwrapper { - float: none; - background: #fff; - } - - div.sphinx div.sphinxsidebar { - display: block; - float: none; - width: 102.5%; - margin: 50px -30px -20px -30px; - padding: 10px 20px; - background: #333; - color: #fff; - } - - div.sphinx div.sphinxsidebar h3, - div.sphinx div.sphinxsidebar h4, - div.sphinx div.sphinxsidebar p, - div.sphinx div.sphinxsidebar h3 a { - color: #fff; - } - - div.sphinx div.sphinxsidebar a { - color: #aaa; - } - - div.sphinx div.sphinxsidebar p.logo { - display: none; - } - - div.sphinx div.document { - width: 100%; - margin: 0; - } - - div.sphinx div.bodywrapper { - margin: 0; - } - - div.sphinx .rtd_doc_footer { - display: none; - } - - div.sphinx .document { - width: auto; - } - - div.sphinx .github { - display: none; - } -} - -/* misc. */ - -div.sphinx .revsys-inline { - display: none !important; -} - -/* Make nested-list/multi-paragraph items look better in Releases changelog - * pages. Without this, docutils' magical list fuckery causes inconsistent - * formatting between different release sub-lists. - */ -div.sphinx div#changelog > div.section > ul > li > p:only-child { - margin-bottom: 0; -} - -/* Hide fugly table cell borders in ..bibliography:: directive output */ -div.sphinx table.docutils.citation, -div.sphinx table.docutils.citation td, -div.sphinx table.docutils.citation th { - border: none; - /* Below needed in some edge cases; if not applied, bottom shadows appear */ - -moz-box-shadow: none; - -webkit-box-shadow: none; - box-shadow: none; -} - -/* relbar */ - -div.sphinx .related { - line-height: 30px; - width: 100%; - font-size: 0.9rem; -} - -div.sphinx .related.top { - border-bottom: 1px solid #eee; - margin-bottom: 20px; -} - -div.sphinx .related.bottom { - border-top: 1px solid #eee; -} - -div.sphinx .related ul { - padding: 0; - margin: 0; - list-style: none; -} - -div.sphinx .related li { - display: inline; -} - -div.sphinx nav#rellinks { - float: right; -} - -div.sphinx nav#rellinks li + li:before { - content: '|'; -} - -div.sphinx nav#breadcrumbs li + li:before { - content: '\00BB'; -} - -/* Hide certain items when printing */ -@media print { - div.sphinx div.related { - display: none; - } -} diff --git a/website/static/css/pygments.css b/website/static/css/pygments.css deleted file mode 100644 index 0ec5f9393cb..00000000000 --- a/website/static/css/pygments.css +++ /dev/null @@ -1,212 +0,0 @@ -/** - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */ - -.highlight .hll { - background-color: #ffffcc; -} -.highlight .c { - color: #60a0b0; - font-style: italic; -} /* Comment */ -.highlight .err { - border: 1px solid #ff0000; -} /* Error */ -.highlight .k { - color: #007020; - font-weight: bold; -} /* Keyword */ -.highlight .o { - color: #666666; -} /* Operator */ -.highlight .cm { - color: #60a0b0; - font-style: italic; -} /* Comment.Multiline */ -.highlight .cp { - color: #007020; -} /* Comment.Preproc */ -.highlight .c1 { - color: #60a0b0; - font-style: italic; -} /* Comment.Single */ -.highlight .cs { - color: #60a0b0; - background-color: #fff0f0; -} /* Comment.Special */ -.highlight .gd { - color: #a00000; -} /* Generic.Deleted */ -.highlight .ge { - font-style: italic; -} /* Generic.Emph */ -.highlight .gr { - color: #ff0000; -} /* Generic.Error */ -.highlight .gh { - color: #000080; - font-weight: bold; -} /* Generic.Heading */ -.highlight .gi { - color: #00a000; -} /* Generic.Inserted */ -.highlight .go { - color: #808080; -} /* Generic.Output */ -.highlight .gp { - color: #c65d09; - font-weight: bold; -} /* Generic.Prompt */ -.highlight .gs { - font-weight: bold; -} /* Generic.Strong */ -.highlight .gu { - color: #800080; - font-weight: bold; -} /* Generic.Subheading */ -.highlight .gt { - color: #0040d0; -} /* Generic.Traceback */ -.highlight .kc { - color: #007020; - font-weight: bold; -} /* Keyword.Constant */ -.highlight .kd { - color: #007020; - font-weight: bold; -} /* Keyword.Declaration */ -.highlight .kn { - color: #007020; - font-weight: bold; -} /* Keyword.Namespace */ -.highlight .kp { - color: #007020; -} /* Keyword.Pseudo */ -.highlight .kr { - color: #007020; - font-weight: bold; -} /* Keyword.Reserved */ -.highlight .kt { - color: #902000; -} /* Keyword.Type */ -.highlight .m { - color: #40a070; -} /* Literal.Number */ -.highlight .s { - color: #4070a0; -} /* Literal.String */ -.highlight .na { - color: #4070a0; -} /* Name.Attribute */ -.highlight .nb { - color: #007020; -} /* Name.Builtin */ -.highlight .nc { - color: #0e84b5; - font-weight: bold; -} /* Name.Class */ -.highlight .no { - color: #60add5; -} /* Name.Constant */ -.highlight .nd { - color: #555555; - font-weight: bold; -} /* Name.Decorator */ -.highlight .ni { - color: #d55537; - font-weight: bold; -} /* Name.Entity */ -.highlight .ne { - color: #007020; -} /* Name.Exception */ -.highlight .nf { - color: #06287e; -} /* Name.Function */ -.highlight .nl { - color: #002070; - font-weight: bold; -} /* Name.Label */ -.highlight .nn { - color: #0e84b5; - font-weight: bold; -} /* Name.Namespace */ -.highlight .nt { - color: #062873; - font-weight: bold; -} /* Name.Tag */ -.highlight .nv { - color: #bb60d5; -} /* Name.Variable */ -.highlight .ow { - color: #007020; - font-weight: bold; -} /* Operator.Word */ -.highlight .w { - color: #bbbbbb; -} /* Text.Whitespace */ -.highlight .mf { - color: #40a070; -} /* Literal.Number.Float */ -.highlight .mh { - color: #40a070; -} /* Literal.Number.Hex */ -.highlight .mi { - color: #40a070; -} /* Literal.Number.Integer */ -.highlight .mo { - color: #40a070; -} /* Literal.Number.Oct */ -.highlight .sb { - color: #4070a0; -} /* Literal.String.Backtick */ -.highlight .sc { - color: #4070a0; -} /* Literal.String.Char */ -.highlight .sd { - color: #4070a0; - font-style: italic; -} /* Literal.String.Doc */ -.highlight .s2 { - color: #4070a0; -} /* Literal.String.Double */ -.highlight .se { - color: #4070a0; - font-weight: bold; -} /* Literal.String.Escape */ -.highlight .sh { - color: #4070a0; -} /* Literal.String.Heredoc */ -.highlight .si { - color: #70a0d0; - font-style: italic; -} /* Literal.String.Interpol */ -.highlight .sx { - color: #c65d09; -} /* Literal.String.Other */ -.highlight .sr { - color: #235388; -} /* Literal.String.Regex */ -.highlight .s1 { - color: #4070a0; -} /* Literal.String.Single */ -.highlight .ss { - color: #517918; -} /* Literal.String.Symbol */ -.highlight .bp { - color: #007020; -} /* Name.Builtin.Pseudo */ -.highlight .vc { - color: #bb60d5; -} /* Name.Variable.Class */ -.highlight .vg { - color: #bb60d5; -} /* Name.Variable.Global */ -.highlight .vi { - color: #bb60d5; -} /* Name.Variable.Instance */ -.highlight .il { - color: #40a070; -} /* Literal.Number.Integer.Long */ diff --git a/website/static/img/meta_opensource_logo_negative.svg b/website/static/img/meta_opensource_logo_negative.svg new file mode 100644 index 00000000000..b36a423e3bf --- /dev/null +++ b/website/static/img/meta_opensource_logo_negative.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/website/static/js/mathjax.js b/website/static/js/mathjax.js deleted file mode 100644 index acbbff65591..00000000000 --- a/website/static/js/mathjax.js +++ /dev/null @@ -1,25 +0,0 @@ -/** - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */ - -window.MathJax = { - tex2jax: { - inlineMath: [['$', '$'], ['\\(', '\\)']], - displayMath: [['$$', '$$'], ['\\[', '\\]']], - processEscapes: true, - processEnvironments: true, - }, - // Center justify equations in code and markdown cells. Note that this - // doesn't work with Plotly though, hence the !important declaratio - // below. - displayAlign: 'center', - 'HTML-CSS': { - styles: { - '.MathJax_Display': {margin: 0, 'text-align': 'center !important'}, - }, - linebreaks: {automatic: true}, - }, -}; diff --git a/website/tutorials.json b/website/tutorials.json index 98552c1ce51..8cd882c1db0 100644 --- a/website/tutorials.json +++ b/website/tutorials.json @@ -61,7 +61,6 @@ "title": "Sparsity Exploration Bayesian Optimization (SEBO)" }, { - "dir": "early_stopping", "id": "early_stopping", "title": "Trial-Level Early Stopping" }, @@ -76,7 +75,6 @@ "title": "Bandit Optimization" }, { - "dir": "human_in_the_loop", "id": "human_in_the_loop", "title": "Human-in-the-Loop Optimization" } diff --git a/website/versioned_docs/.gitkeep b/website/versioned_docs/.gitkeep deleted file mode 100644 index 8b137891791..00000000000 --- a/website/versioned_docs/.gitkeep +++ /dev/null @@ -1 +0,0 @@ - diff --git a/website/versioned_sidebars/.gitkeep b/website/versioned_sidebars/.gitkeep deleted file mode 100644 index 8b137891791..00000000000 --- a/website/versioned_sidebars/.gitkeep +++ /dev/null @@ -1 +0,0 @@ -