diff --git a/docs/$READTHEDOCS_OUTPUT/html/.buildinfo b/docs/$READTHEDOCS_OUTPUT/html/.buildinfo deleted file mode 100644 index 4462e83..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 35dc572e0d396c0b6127e5d298d06ab7 -tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/$READTHEDOCS_OUTPUT/html/_modules/index.html b/docs/$READTHEDOCS_OUTPUT/html/_modules/index.html deleted file mode 100644 index 3ccfcb2..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_modules/index.html +++ /dev/null @@ -1,129 +0,0 @@ - - - - - - Overview: module code — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- - -
-
- - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/base_models/classifier.html b/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/base_models/classifier.html deleted file mode 100644 index 38bc377..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/base_models/classifier.html +++ /dev/null @@ -1,508 +0,0 @@ - - - - - - mambular.base_models.classifier — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mambular.base_models.classifier

-import torch
-import torch.nn as nn
-import torchmetrics
-from ..utils.mamba_arch import Mamba
-from ..utils.config import MambularConfig
-import pytorch_lightning as pl
-
-
-
[docs]class BaseMambularClassifier(pl.LightningModule): - """ - A base class for building classification models using the Mambular architecture within the PyTorch Lightning framework. - - This class integrates various components such as embeddings for categorical and numerical features, the Mambular model - for processing sequences of embeddings, and a classification head for prediction. It supports multi-class and binary classification tasks. - - Parameters - ---------- - num_classes : int - The number of classes in the classification task. For binary classification, this should be 2. - config : MambularConfig - An instance of MambularConfig containing configuration parameters for the Mambular model. - cat_feature_info : dict, optional - A dictionary mapping the names of categorical features to their number of unique categories. - This information is used to configure embedding layers for categorical features. Defaults to None. - num_feature_info : dict, optional - A dictionary mapping the names of numerical features to the size of their input dimensions. - This information is used to configure embedding layers for numerical features. Defaults to None. - lr : float, optional - The learning rate for the optimizer. Defaults to 1e-03. - lr_patience : int, optional - The number of epochs with no improvement after which learning rate will be reduced. Defaults to 10. - weight_decay : float, optional - Weight decay (L2 penalty) parameter for the optimizer. Defaults to 0.025. - lr_factor : float, optional - Factor by which the learning rate will be reduced. Defaults to 0.75. - - Attributes - ---------- - embedding_activation : nn.Module - The activation function to be applied after the linear transformation of numerical features. - num_embeddings : nn.ModuleList - A list of sequential modules, each corresponding to an embedding layer for a numerical feature. - cat_embeddings : nn.ModuleList - A list of embedding layers, each corresponding to a categorical feature. - mamba : Mamba - The Mambular model for processing sequences of embeddings. - norm_f : nn.Module - A normalization layer applied after the Mambular model. - tabular_head : nn.Linear - A linear layer for predicting the class labels from the aggregated embedding representation. - pooling_method : str - The method used to aggregate embeddings across features. Supported methods are 'avg', 'max', and 'sum'. - loss_fct : nn.Module - The loss function used for training the model, configured based on the number of classes. - acc : torchmetrics.Accuracy - A metric for computing the accuracy of predictions. - auroc : torchmetrics.AUROC - A metric for computing the Area Under the Receiver Operating Characteristic curve. - precision : torchmetrics.Precision - A metric for computing the precision of predictions. - - Methods - ------- - forward(cat_features, num_features) - Defines the forward pass of the model, processing both categorical and numerical features, aggregating embeddings, - and producing predictions. - training_step(batch, batch_idx) - Performs a single training step, computing the loss and logging metrics for the training set. - validation_step(batch, batch_idx) - Performs a single validation step, computing the loss and logging metrics for the validation set. - configure_optimizers() - Configures the model's optimizers and learning rate schedulers. - """ - - - - - - - def __init__( - self, - num_classes, - config: MambularConfig, - cat_feature_info: dict = None, - num_feature_info: dict = None, - lr=1e-03, - lr_patience=10, - weight_decay=0.025, - lr_factor=0.75, - ): - super().__init__() - - self.config = config - self.num_classes = 1 if num_classes == 2 else num_classes - self.lr = lr - self.lr_patience = lr_patience - self.weight_decay = weight_decay - self.lr_factor = lr_factor - self.cat_feature_info = cat_feature_info - self.num_feature_info = num_feature_info - - activations = { - "relu": nn.ReLU(), - "tanh": nn.Tanh(), - "sigmoid": nn.Sigmoid(), - "leaky_relu": nn.LeakyReLU(), - "elu": nn.ELU(), - "selu": nn.SELU(), - "gelu": nn.GELU(), - "softplus": nn.Softplus(), - "leakyrelu": nn.LeakyReLU(), - "linear": nn.Identity(), - } - - self.embedding_activation = activations.get( - self.config.num_embedding_activation.lower() - ) - if self.embedding_activation is None: - raise ValueError( - f"Unsupported activation function: {self.config.num_embedding_activation}" - ) - - self.num_embeddings = nn.ModuleList( - [ - nn.Sequential( - nn.Linear(input_shape, self.config.d_model, bias=False), - nn.BatchNorm1d(self.config.d_model), - self.embedding_activation, # Example using ReLU as the activation function, change as needed - ) - for feature_name, input_shape in num_feature_info.items() - ] - ) - - # Create embedding layers for categorical features based on cat_feature_info - self.cat_embeddings = nn.ModuleList( - [ - nn.Embedding(num_categories + 1, self.config.d_model) - for feature_name, num_categories in cat_feature_info.items() - ] - ) - - self.mamba = Mamba(self.config) - self.norm_f = self.config.norm(self.config.d_model) - - mlp_activation_fn = activations.get( - self.config.tabular_head_activation.lower(), nn.Identity() - ) - mlp_layers = [] - input_dim = self.config.d_model # Initial input dimension - - # Iterate over the specified units for each layer in the MLP - for units in self.config.tabular_head_units: - mlp_layers.append(nn.Linear(input_dim, units)) - mlp_layers.append(mlp_activation_fn) - mlp_layers.append(nn.Dropout(self.config.tabular_head_dropout)) - input_dim = units - - # Add the final linear layer to map to a single output value - mlp_layers.append(nn.Linear(input_dim, self.num_classes)) - - # Combine all layers into a Sequential module - self.tabular_head = nn.Sequential(*mlp_layers) - - self.pooling_method = self.config.pooling_method - self.cls_token = nn.Parameter(torch.zeros(1, 1, self.config.d_model)) - - if self.num_classes > 2: - self.loss_fct = nn.CrossEntropyLoss() - self.acc = torchmetrics.Accuracy( - task="multiclass", num_classes=self.num_classes - ) - self.auroc = torchmetrics.AUROC( - task="multiclass", num_classes=self.num_classes - ) - self.precision = torchmetrics.Precision( - task="multiclass", num_classes=self.num_classes - ) - else: - self.loss_fct = torch.nn.BCEWithLogitsLoss() - self.acc = torchmetrics.Accuracy(task="binary") - self.auroc = torchmetrics.AUROC(task="binary") - self.precision = torchmetrics.Precision(task="binary") - -
[docs] def forward(self, cat_features, num_features): - """ - Defines the forward pass of the classifier. - - Parameters - ---------- - cat_features : Tensor - Tensor containing the categorical features. - num_features : Tensor - Tensor containing the numerical features. - - Returns - ------- - Tensor - The output predictions of the model. - """ - batch_size = ( - cat_features[0].size(0) - if cat_features is not None - else num_features[0].size(0) - ) - cls_tokens = self.cls_token.expand(batch_size, -1, -1) - - # Process categorical features if present - if len(self.cat_embeddings) > 0 and cat_features: - cat_embeddings = [ - emb(cat_features[i]) for i, emb in enumerate(self.cat_embeddings) - ] - cat_embeddings = torch.stack(cat_embeddings, dim=1) - cat_embeddings = torch.squeeze(cat_embeddings, dim=2) - else: - cat_embeddings = None - - # Process numerical features if present - if len(self.num_embeddings) > 0 and num_features: - num_embeddings = [ - emb(num_features[i]) for i, emb in enumerate(self.num_embeddings) - ] - num_embeddings = torch.stack(num_embeddings, dim=1) - else: - num_embeddings = None - - # Combine embeddings if both types are present, otherwise use whichever is available - if cat_embeddings is not None and num_embeddings is not None: - x = torch.cat([cls_tokens, cat_embeddings, num_embeddings], dim=1) - elif cat_embeddings is not None: - x = torch.cat([cls_tokens, cat_embeddings], dim=1) - elif num_embeddings is not None: - x = torch.cat([cls_tokens, num_embeddings], dim=1) - else: - raise ValueError("No features provided to the model.") - - x = self.mamba(x) - - # Apply pooling based on the specified method - if self.pooling_method == "avg": - x = torch.mean(x, dim=1) - elif self.pooling_method == "max": - x, _ = torch.max(x, dim=1) - elif self.pooling_method == "sum": - x = torch.sum(x, dim=1) - elif self.pooling_method == "cls": - x = x[:, 0] - else: - raise ValueError(f"Invalid pooling method: {self.pooling_method}") - - x = self.norm_f(x) - preds = self.tabular_head(x) - - return preds
- -
[docs] def training_step(self, batch, batch_idx): - """ - Processes a single batch during training, computes the loss and logs training metrics. - - Parameters - ---------- - batch : tuple - A batch of data from the DataLoader, containing numerical features, categorical features, and labels. - batch_idx : int - The index of the batch within the epoch. - """ - - num_features, cat_features, labels = batch - preds = self(num_features, cat_features) - - if self.num_classes == 1: - labels = labels.unsqueeze( - 1 - ).float() # Reshape for binary classification loss calculation - - loss = self.loss_fct(preds, labels) - self.log("train_loss", loss) - # Calculate and log training accuracy - - acc = self.acc(preds, labels.int()) - self.log( - "train_acc", - acc, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - # Calculate and log AUROC - auroc = self.auroc(preds, labels.int()) - self.log( - "train_auroc", - auroc, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - # Calculate and log precision - precision = self.precision(preds, labels.int()) - self.log( - "train_precision", - precision, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - return loss
- -
[docs] def validation_step(self, batch, batch_idx): - """ - Processes a single batch during validation, computes the loss and logs validation metrics. - - Parameters - ---------- - batch : tuple - A batch of data from the DataLoader, containing numerical features, categorical features, and labels. - batch_idx : int - The index of the batch within the epoch. - """ - - num_features, cat_features, labels = batch - preds = self(num_features, cat_features) - - if self.num_classes == 1: - labels = labels.unsqueeze( - 1 - ).float() # Reshape for binary classification loss calculation - - loss = self.loss_fct(preds, labels) - self.log("val_loss", loss) - # Calculate and log training accuracy - - acc = self.acc(preds, labels.int()) - self.log( - "val_acc", - acc, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - auroc = self.auroc(preds, labels.int()) - self.log( - "val_auroc", auroc, on_step=False, on_epoch=True, prog_bar=True, logger=True - ) - - # Calculate and log precision - precision = self.precision(preds, labels.int()) - self.log( - "val_precision", - precision, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - )
- -
[docs] def configure_optimizers(self): - """ - Sets up the model's optimizer and learning rate scheduler based on the configurations provided. - - Returns - ------- - dict - A dictionary containing the optimizer and lr_scheduler configurations. - """ - optimizer = torch.optim.Adam( - self.parameters(), lr=self.lr, weight_decay=self.config.weight_decay - ) - scheduler = { - "scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau( - optimizer, - mode="min", - factor=self.lr_factor, - patience=self.lr_patience, - verbose=True, - ), - "monitor": "val_loss", # Name of the metric to monitor - "interval": "epoch", - "frequency": 1, - } - - return {"optimizer": optimizer, "lr_scheduler": scheduler}
-
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/base_models/distributional.html b/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/base_models/distributional.html deleted file mode 100644 index bbe4b64..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/base_models/distributional.html +++ /dev/null @@ -1,457 +0,0 @@ - - - - - - mambular.base_models.distributional — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mambular.base_models.distributional

-import torch
-import torch.nn as nn
-from ..utils.mamba_arch import Mamba
-from ..utils.config import MambularConfig
-import pytorch_lightning as pl
-from ..utils.distributions import (
-    NormalDistribution,
-    NegativeBinomialDistribution,
-    GammaDistribution,
-    StudentTDistribution,
-    PoissonDistribution,
-    InverseGammaDistribution,
-    BetaDistribution,
-    DirichletDistribution,
-    CategoricalDistribution,
-)
-
-
-
[docs]class BaseMambularLSS(pl.LightningModule): - """ - A base module for likelihood-based statistical learning (LSS) models built on PyTorch Lightning, - integrating the Mamba architecture for tabular data. This module is designed to accommodate various - statistical distribution families for different types of regression and classification tasks. - - Parameters - ---------- - family : str - The name of the statistical distribution family to be used for modeling. Supported families include - 'normal', 'poisson', 'gamma', 'beta', 'dirichlet', 'studentt', 'negativebinom', 'inversegamma', and 'categorical'. - config : MambularConfig - An instance of MambularConfig containing configuration parameters for the model architecture. - cat_feature_info : dict, optional - A dictionary mapping the names of categorical features to their number of unique categories. Defaults to None. - num_feature_info : dict, optional - A dictionary mapping the names of numerical features to their number of dimensions after embedding. Defaults to None. - lr : float, optional - The initial learning rate for the optimizer. Defaults to 1e-03. - lr_patience : int, optional - The number of epochs with no improvement after which learning rate will be reduced. Defaults to 10. - weight_decay : float, optional - Weight decay (L2 penalty) coefficient. Defaults to 0.025. - lr_factor : float, optional - Factor by which the learning rate will be reduced. Defaults to 0.75. - **distribution_params : - Additional parameters specific to the chosen statistical distribution family. - - Attributes - ---------- - mamba : Mamba - The core neural network module implementing the Mamba architecture. - norm_f : nn.Module - Normalization layer applied after the Mamba block. - tabular_head : nn.Linear - Final linear layer mapping the features to the parameters of the chosen statistical distribution. - loss_fct : callable - The loss function derived from the chosen statistical distribution. - - Methods - ------- - forward(cat_features, num_features) - Defines the forward pass of the model. - training_step(batch, batch_idx) - Processes a single batch during training. - validation_step(batch, batch_idx) - Processes a single batch during validation. - configure_optimizers() - Sets up the model's optimizer and learning rate scheduler. - """ - - - - - def __init__( - self, - family, - config: MambularConfig, - cat_feature_info: dict = None, - num_feature_info: dict = None, - lr=1e-03, - lr_patience=10, - weight_decay=0.025, - lr_factor=0.75, - **distribution_params, - ): - super().__init__() - - self.config = config - self.lr = lr - self.lr_patience = lr_patience - self.weight_decay = weight_decay - self.lr_factor = lr_factor - self.cat_feature_info = cat_feature_info - self.num_feature_info = num_feature_info - - activations = { - "relu": nn.ReLU(), - "tanh": nn.Tanh(), - "sigmoid": nn.Sigmoid(), - "leaky_relu": nn.LeakyReLU(), - "elu": nn.ELU(), - "selu": nn.SELU(), - "gelu": nn.GELU(), - "softplus": nn.Softplus(), - "leakyrelu": nn.LeakyReLU(), - "linear": nn.Identity(), - } - - distribution_classes = { - "normal": NormalDistribution, - "poisson": PoissonDistribution, - "gamma": GammaDistribution, - "beta": BetaDistribution, - "dirichlet": DirichletDistribution, - "studentt": StudentTDistribution, - "negativebinom": NegativeBinomialDistribution, - "inversegamma": InverseGammaDistribution, - "categorical": CategoricalDistribution, - } - - if family in distribution_classes: - # Pass additional distribution_params to the constructor of the distribution class - self.family = distribution_classes[family](**distribution_params) - else: - raise ValueError("Unsupported family: {}".format(family)) - - self.embedding_activation = activations.get( - self.config.num_embedding_activation.lower() - ) - if self.embedding_activation is None: - raise ValueError( - f"Unsupported activation function: {self.config.num_embedding_activation}" - ) - - self.num_embeddings = nn.ModuleList( - [ - nn.Sequential( - nn.Linear(input_shape, self.config.d_model, bias=False), - nn.BatchNorm1d(self.config.d_model), - self.embedding_activation, # Example using ReLU as the activation function, change as needed - ) - for feature_name, input_shape in num_feature_info.items() - ] - ) - - # Create embedding layers for categorical features based on cat_feature_info - self.cat_embeddings = nn.ModuleList( - [ - nn.Embedding(num_categories + 1, self.config.d_model) - for feature_name, num_categories in cat_feature_info.items() - ] - ) - - mlp_activation_fn = activations.get( - self.config.tabular_head_activation.lower(), nn.Identity() - ) - - self.mamba = Mamba(self.config) - self.norm_f = self.config.norm(self.config.d_model) - mlp_layers = [] - input_dim = self.config.d_model # Initial input dimension - - # Iterate over the specified units for each layer in the MLP - for units in self.config.tabular_head_units: - mlp_layers.append(nn.Linear(input_dim, units)) - mlp_layers.append(mlp_activation_fn) - mlp_layers.append(nn.Dropout(self.config.tabular_head_dropout)) - input_dim = units - - # Add the final linear layer to map to #distributional param output values - mlp_layers.append(nn.Linear(input_dim, self.family.param_count)) - - # Combine all layers into a Sequential module - self.tabular_head = nn.Sequential(*mlp_layers) - - self.loss_fct = lambda predictions, y_true: self.family.compute_loss( - predictions, y_true - ) - - self.cls_token = nn.Parameter(torch.zeros(1, 1, self.config.d_model)) - self.pooling_method = self.config.pooling_method - -
[docs] def forward(self, cat_features, num_features): - """ - Defines the forward pass of the model, processing both categorical and numerical features, - and returning predictions based on the configured statistical distribution. - - Parameters - ---------- - cat_features : Tensor - Tensor containing the categorical features. - num_features : Tensor - Tensor containing the numerical features. - - Returns - ------- - Tensor - The predictions of the model, typically the parameters of the chosen statistical distribution. - """ - - batch_size = ( - cat_features[0].size(0) - if cat_features is not None - else num_features[0].size(0) - ) - cls_tokens = self.cls_token.expand(batch_size, -1, -1) - - # Process categorical features if present - if len(self.cat_embeddings) > 0 and cat_features: - cat_embeddings = [ - emb(cat_features[i]) for i, emb in enumerate(self.cat_embeddings) - ] - cat_embeddings = torch.stack(cat_embeddings, dim=1) - cat_embeddings = torch.squeeze(cat_embeddings, dim=2) - else: - cat_embeddings = None - - # Process numerical features if present - if len(self.num_embeddings) > 0 and num_features: - num_embeddings = [ - emb(num_features[i]) for i, emb in enumerate(self.num_embeddings) - ] - num_embeddings = torch.stack(num_embeddings, dim=1) - else: - num_embeddings = None - - # Combine embeddings if both types are present, otherwise use whichever is available - if cat_embeddings is not None and num_embeddings is not None: - x = torch.cat([cls_tokens, cat_embeddings, num_embeddings], dim=1) - elif cat_embeddings is not None: - x = torch.cat([cls_tokens, cat_embeddings], dim=1) - elif num_embeddings is not None: - x = torch.cat([cls_tokens, num_embeddings], dim=1) - else: - raise ValueError("No features provided to the model.") - - x = self.mamba(x) - - # Apply pooling based on the specified method - if self.pooling_method == "avg": - x = torch.mean(x, dim=1) - elif self.pooling_method == "max": - x, _ = torch.max(x, dim=1) - elif self.pooling_method == "sum": - x = torch.sum(x, dim=1) - elif self.pooling_method == "cls": - x = x[:, 0] - else: - raise ValueError(f"Invalid pooling method: {self.pooling_method}") - - x = self.norm_f(x) - preds = self.tabular_head(x) - - return preds
- -
[docs] def training_step(self, batch, batch_idx): - """ - Processes a single batch during training, computes the loss using the distribution-specific loss function, - and logs training metrics. - - Parameters - ---------- - batch : tuple - A batch of data from the DataLoader, containing numerical features, categorical features, and labels. - batch_idx : int - The index of the batch within the epoch. - - Returns - ------- - Tensor - The computed loss for the batch. - """ - num_features, cat_features, labels = batch - preds = self(num_features, cat_features) - - loss = self.loss_fct(preds, labels) - self.log( - "train_loss", - loss, - on_epoch=True, - prog_bar=True, - logger=True, - ) - return loss
- -
[docs] def validation_step(self, batch, batch_idx): - """ - Processes a single batch during validation, computes the loss using the distribution-specific loss function, - and logs validation metrics. - - Parameters - ---------- - batch : tuple - A batch of data from the DataLoader, containing numerical features, categorical features, and labels. - batch_idx : int - The index of the batch within the epoch. - """ - - - num_features, cat_features, labels = batch - preds = self(num_features, cat_features) - - loss = self.loss_fct(preds, labels) - self.log( - "val_loss", - loss, - on_epoch=True, - prog_bar=True, - logger=True, - ) - return loss
- -
[docs] def configure_optimizers(self): - """ - Sets up the model's optimizer and learning rate scheduler based on the configurations provided. - - Returns - ------- - dict - A dictionary containing the optimizer and lr_scheduler configurations. - """ - optimizer = torch.optim.Adam( - self.parameters(), lr=self.lr, weight_decay=self.config.weight_decay - ) - scheduler = { - "scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau( - optimizer, - mode="min", - factor=self.lr_factor, - patience=self.lr_patience, - verbose=True, - ), - "monitor": "val_loss", # Name of the metric to monitor - "interval": "epoch", - "frequency": 1, - } - - return {"optimizer": optimizer, "lr_scheduler": scheduler}
-
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/base_models/embedding_classifier.html b/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/base_models/embedding_classifier.html deleted file mode 100644 index 5e1562d..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/base_models/embedding_classifier.html +++ /dev/null @@ -1,539 +0,0 @@ - - - - - - mambular.base_models.embedding_classifier — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mambular.base_models.embedding_classifier

-import torch
-import torch.nn as nn
-from ..utils.mamba_arch import Mamba
-from ..utils.config import MambularConfig
-import pytorch_lightning as pl
-import torchmetrics
-
-
-
[docs]class BaseEmbeddingMambularClassifier(pl.LightningModule): - """ - A specialized classification module for protein data, built on PyTorch Lightning and integrating the Mamba architecture. - It supports embeddings for categorical features and can process raw or embedded numerical features, making it suitable - for complex protein sequence data. - - Parameters - ---------- - config : MambularConfig - Configuration parameters for the model architecture. - cat_feature_info : dict, optional - Information about categorical features, mapping feature names to the number of unique categories. - num_feature_info : dict, optional - Information about numerical features, mapping feature names to their number of dimensions after embedding. - lr : float, optional - Learning rate for the optimizer. Defaults to 1e-03. - lr_patience : int, optional - Number of epochs with no improvement after which learning rate will be reduced. Defaults to 10. - weight_decay : float, optional - Weight decay coefficient for regularization in the optimizer. Defaults to 0.025. - lr_factor : float, optional - Factor by which the learning rate will be reduced by the scheduler. Defaults to 0.75. - seq_size : int, optional - Size of sequence chunks for processing numerical features. Relevant when `raw_embeddings` is False. - raw_embeddings : bool, optional - Indicates whether to use raw numerical features directly or to process them into embeddings. Defaults to False. - - Attributes - ---------- - mamba : Mamba - The core neural network module implementing the Mamba architecture. - norm_f : nn.Module - Normalization layer applied after the Mamba block. - tabular_head : nn.Linear - Final linear layer mapping the features to the target. - - Methods - ------- - forward(cat_features, num_features) - Defines the forward pass of the model. - training_step(batch, batch_idx) - Processes a single batch during training. - validation_step(batch, batch_idx) - Processes a single batch during validation. - configure_optimizers() - Sets up the model's optimizer and learning rate scheduler. - """ - - def __init__( - self, - num_classes, - config: MambularConfig, - cat_feature_info: dict = None, - num_feature_info: dict = None, - lr=1e-03, - lr_patience=10, - weight_decay=0.025, - lr_factor=0.75, - seq_size: int = 20, - raw_embeddings=False, - ): - super().__init__() - - self.config = config - self.num_classes = 1 if num_classes == 2 else num_classes - self.lr = lr - self.lr_patience = lr_patience - self.weight_decay = weight_decay - self.lr_factor = lr_factor - self.cat_feature_info = cat_feature_info - self.num_feature_info = num_feature_info - self.seq_size = seq_size - self.raw_embeddings = raw_embeddings - - activations = { - "relu": nn.ReLU(), - "tanh": nn.Tanh(), - "sigmoid": nn.Sigmoid(), - "leaky_relu": nn.LeakyReLU(), - "elu": nn.ELU(), - "selu": nn.SELU(), - "gelu": nn.GELU(), - "softplus": nn.Softplus(), - "leakyrelu": nn.LeakyReLU(), - "linear": nn.Identity(), - } - - if not self.raw_embeddings: - data_size = len(num_feature_info.items()) - num_embedding_modules = data_size // self.seq_size - self.num_embeddings = nn.ModuleList( - [ - nn.Sequential( - nn.Linear(self.seq_size, self.config.d_model, bias=False), - self.embedding_activation, # Example using ReLU as the activation function, change as needed - ) - for _ in range(num_embedding_modules) - ] - ) - else: - data_size = len(num_feature_info.items()) - num_embedding_modules = data_size - self.num_embeddings = nn.ModuleList( - [ - nn.Sequential( - nn.Linear(1, self.config.d_model, bias=False), - self.embedding_activation, # Example using ReLU as the activation function, change as needed - ) - for _ in range(num_embedding_modules) - ] - ) - - self.cat_embeddings = nn.ModuleList( - [ - nn.Embedding(num_categories + 1, self.config.d_model) - for feature_name, num_categories in cat_feature_info.items() - ] - ) - - self.mamba = Mamba(self.config) - self.norm_f = self.config.norm(self.config.d_model) - mlp_activation_fn = activations.get( - self.config.tabular_head_activation.lower(), nn.Identity() - ) - - # Dynamically create MLP layers based on config.tabular_units - mlp_layers = [] - input_dim = self.config.d_model # Initial input dimension - - # Iterate over the specified units for each layer in the MLP - for units in self.config.tabular_head_units: - mlp_layers.append(nn.Linear(input_dim, units)) - mlp_layers.append(mlp_activation_fn) - mlp_layers.append(nn.Dropout(self.config.tabular_head_dropout)) - input_dim = units - - # Add the final linear layer to map to a single output value - mlp_layers.append(nn.Linear(input_dim, self.num_classes)) - - # Combine all layers into a Sequential module - self.tabular_head = nn.Sequential(*mlp_layers) - - self.pooling_method = self.config.pooling_method - self.cls_token = nn.Parameter(torch.zeros(1, 1, self.config.d_model)) - - if self.config.layer_norm_after_embedding: - self.embedding_norm = nn.LayerNorm(self.config.d_model) - - if self.num_classes > 2: - self.loss_fct = nn.CrossEntropyLoss() - self.acc = torchmetrics.Accuracy( - task="multiclass", num_classes=self.num_classes - ) - self.auroc = torchmetrics.AUROC( - task="multiclass", num_classes=self.num_classes - ) - self.precision = torchmetrics.Precision( - task="multiclass", num_classes=self.num_classes - ) - else: - self.loss_fct = torch.nn.BCEWithLogitsLoss() - self.acc = torchmetrics.Accuracy(task="binary") - self.auroc = torchmetrics.AUROC(task="binary") - self.precision = torchmetrics.Precision(task="binary") - -
[docs] def forward(self, cat_features, num_features): - """ - Defines the forward pass of the model, processing both categorical and numerical features, - and returning regression predictions. - - Parameters - ---------- - cat_features : Tensor - Tensor containing the categorical features. - num_features : Tensor - Tensor containing the numerical features or raw sequence data, depending on `raw_embeddings`. - - Returns - ------- - Tensor - The output predictions of the model for regression tasks. - """ - batch_size = ( - cat_features[0].size(0) if cat_features != [] else num_features[0].size(0) - ) - cls_tokens = self.cls_token.expand(batch_size, -1, -1) - # Process categorical features if present - if not self.raw_embeddings: - if len(self.cat_embeddings) > 0 and cat_features: - cat_embeddings = [ - emb(cat_features[i]) for i, emb in enumerate(self.cat_embeddings) - ] - cat_embeddings = torch.stack(cat_embeddings, dim=1) - cat_embeddings = torch.squeeze(cat_embeddings, dim=2) - else: - cat_embeddings = None - - if len(self.num_embeddings) > 0 and num_features: - num_embeddings = [] - # Iterate through the num_embeddings, taking slices of num_features for each - for i, emb in enumerate(self.num_embeddings): - # Calculate start and end indices for slicing the list - start_idx = i * self.seq_size - end_idx = start_idx + self.seq_size - - # Slice the num_features list to get the current chunk - current_chunk = num_features[start_idx:end_idx] - - # If the current_chunk is not empty, process it - if current_chunk: - # Concatenate tensors in the current chunk along dimension 1 - chunk_tensor = torch.cat(current_chunk, dim=1) - # Apply the embedding layer to the chunk_tensor - num_embeddings.append(emb(chunk_tensor)) - - # Stack the resulting embeddings along the second dimension if num_embeddings is not empty - if num_embeddings: - num_embeddings = torch.stack(num_embeddings, dim=1) - else: - num_embeddings = None - - else: - # Process categorical features if present - if len(self.cat_embeddings) > 0 and cat_features: - cat_embeddings = [ - emb(cat_features[i]) for i, emb in enumerate(self.cat_embeddings) - ] - cat_embeddings = torch.stack(cat_embeddings, dim=1) - cat_embeddings = torch.squeeze(cat_embeddings, dim=2) - if self.config.layer_norm_after_embedding: - cat_embeddings = self.embedding_norm(cat_embeddings) - else: - cat_embeddings = None - - # Process numerical features if present - if len(self.num_embeddings) > 0 and num_features: - num_embeddings = [ - emb(num_features[i]) for i, emb in enumerate(self.num_embeddings) - ] - num_embeddings = torch.stack(num_embeddings, dim=1) - if self.config.layer_norm_after_embedding: - num_embeddings = self.embedding_norm(num_embeddings) - else: - num_embeddings = None - - # Combine embeddings if both types are present, otherwise use whichever is available - if cat_embeddings is not None and num_embeddings is not None: - x = torch.cat([cls_tokens, cat_embeddings, num_embeddings], dim=1) - elif cat_embeddings is not None: - x = torch.cat([cls_tokens, cat_embeddings], dim=1) - elif num_embeddings is not None: - x = torch.cat([cls_tokens, num_embeddings], dim=1) - else: - raise ValueError("No features provided to the model.") - - x = self.mamba(x) - - # Apply pooling based on the specified method - if self.pooling_method == "avg": - x = torch.mean(x, dim=1) - elif self.pooling_method == "max": - x, _ = torch.max(x, dim=1) - elif self.pooling_method == "sum": - x = torch.sum(x, dim=1) - elif self.pooling_method == "cls_token": - x = x[:, 0] - else: - raise ValueError(f"Invalid pooling method: {self.pooling_method}") - - x = self.norm_f(x) - preds = self.tabular_head(x) - - return preds
- -
[docs] def training_step(self, batch, batch_idx): - """ - Processes a single batch during training, computes the loss, and logs training metrics. - - Parameters - ---------- - batch : tuple - A batch of data from the DataLoader, containing numerical features, categorical features, and labels. - batch_idx : int - The index of the batch within the epoch. - - Returns - ------- - Tensor - The computed loss for the batch. - """ - num_features, cat_features, labels = batch - preds = self(num_features, cat_features) - - if self.num_classes == 1: - labels = labels.unsqueeze( - 1 - ).float() # Reshape for binary classification loss calculation - - loss = self.loss_fct(preds, labels) - self.log("train_loss", loss) - # Calculate and log training accuracy - - acc = self.acc(preds, labels.int()) - self.log( - "train_acc", - acc, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - # Calculate and log AUROC - auroc = self.auroc(preds, labels.int()) - self.log( - "train_auroc", - auroc, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - # Calculate and log precision - precision = self.precision(preds, labels.int()) - self.log( - "train_precision", - precision, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - return loss
- -
[docs] def validation_step(self, batch, batch_idx): - """ - Processes a single batch during validation, computes the loss, and logs validation metrics. - - Parameters - ---------- - batch : tuple - A batch of data from the DataLoader, containing numerical features, categorical features, and labels. - batch_idx : int - The index of the batch within the epoch. - """ - num_features, cat_features, labels = batch - preds = self(num_features, cat_features) - - if self.num_classes == 1: - labels = labels.unsqueeze( - 1 - ).float() # Reshape for binary classification loss calculation - - loss = self.loss_fct(preds, labels) - self.log("val_loss", loss) - # Calculate and log training accuracy - - acc = self.acc(preds, labels.int()) - self.log( - "val_acc", - acc, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - auroc = self.auroc(preds, labels.int()) - self.log( - "val_auroc", auroc, on_step=False, on_epoch=True, prog_bar=True, logger=True - ) - - # Calculate and log precision - precision = self.precision(preds, labels.int()) - self.log( - "val_precision", - precision, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - )
- -
[docs] def configure_optimizers(self): - """ - Sets up the model's optimizer and learning rate scheduler based on the configurations provided. - - Returns - ------- - dict - A dictionary containing the optimizer and lr_scheduler configurations. - """ - optimizer = torch.optim.Adam( - self.parameters(), lr=self.lr, weight_decay=self.config.weight_decay - ) - scheduler = { - "scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau( - optimizer, - mode="min", - factor=self.lr_factor, - patience=self.lr_patience, - verbose=True, - ), - "monitor": "val_loss", # Name of the metric to monitor - "interval": "epoch", - "frequency": 1, - } - - return {"optimizer": optimizer, "lr_scheduler": scheduler}
-
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/base_models/embedding_regressor.html b/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/base_models/embedding_regressor.html deleted file mode 100644 index b388834..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/base_models/embedding_regressor.html +++ /dev/null @@ -1,476 +0,0 @@ - - - - - - mambular.base_models.embedding_regressor — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mambular.base_models.embedding_regressor

-import torch
-import torch.nn as nn
-from ..utils.mamba_arch import Mamba
-from ..utils.config import MambularConfig
-import pytorch_lightning as pl
-
-
-
[docs]class BaseEmbeddingMambularRegressor(pl.LightningModule): - """ - A specialized regression module for protein data, built on PyTorch Lightning and integrating the Mamba architecture. - It supports embeddings for categorical features and can process raw or embedded numerical features, making it suitable - for complex protein sequence data. - - Parameters - ---------- - config : MambularConfig - Configuration parameters for the model architecture. - cat_feature_info : dict, optional - Information about categorical features, mapping feature names to the number of unique categories. Defaults to None. - num_feature_info : dict, optional - Information about numerical features, mapping feature names to their number of dimensions after embedding. Defaults to None. - lr : float, optional - Learning rate for the optimizer. Defaults to 1e-03. - lr_patience : int, optional - Number of epochs with no improvement after which learning rate will be reduced. Defaults to 10. - weight_decay : float, optional - Weight decay coefficient for regularization in the optimizer. Defaults to 0.025. - lr_factor : float, optional - Factor by which the learning rate will be reduced by the scheduler. Defaults to 0.75. - seq_size : int, optional - Size of sequence chunks for processing numerical features. Relevant when `raw_embeddings` is False. - raw_embeddings : bool, optional - Indicates whether to use raw numerical features directly or to process them into embeddings. Defaults to False. - - Attributes - ---------- - mamba : Mamba - The core neural network module implementing the Mamba architecture. - norm_f : nn.Module - Normalization layer applied after the Mamba block. - tabular_head : nn.Linear - Final linear layer mapping the features to the regression target. - loss_fct : nn.MSELoss - The loss function for regression tasks. - - Methods - ------- - forward(cat_features, num_features) - Defines the forward pass of the model. - training_step(batch, batch_idx) - Processes a single batch during training. - validation_step(batch, batch_idx) - Processes a single batch during validation. - configure_optimizers() - Sets up the model's optimizer and learning rate scheduler. - """ - - def __init__( - self, - config: MambularConfig, - cat_feature_info: dict = None, - num_feature_info: dict = None, - lr=1e-03, - lr_patience=10, - weight_decay=0.025, - lr_factor=0.75, - seq_size: int = 20, - raw_embeddings=False, - ): - super().__init__() - - self.config = config - self.lr = lr - self.lr_patience = lr_patience - self.weight_decay = weight_decay - self.lr_factor = lr_factor - self.cat_feature_info = cat_feature_info - self.num_feature_info = num_feature_info - self.seq_size = seq_size - self.raw_embeddings = raw_embeddings - - activations = { - "relu": nn.ReLU(), - "tanh": nn.Tanh(), - "sigmoid": nn.Sigmoid(), - "leaky_relu": nn.LeakyReLU(), - "elu": nn.ELU(), - "selu": nn.SELU(), - "gelu": nn.GELU(), - "softplus": nn.Softplus(), - "leakyrelu": nn.LeakyReLU(), - "linear": nn.Identity(), - } - - self.embedding_activation = activations.get( - self.config.num_embedding_activation.lower() - ) - if self.embedding_activation is None: - raise ValueError( - f"Unsupported activation function: {self.config.num_embedding_activation}" - ) - - if not self.raw_embeddings: - data_size = len(num_feature_info.items()) - num_embedding_modules = data_size // self.seq_size - self.num_embeddings = nn.ModuleList( - [ - nn.Sequential( - nn.Linear(self.seq_size, self.config.d_model, bias=False), - self.embedding_activation, # Example using ReLU as the activation function, change as needed - ) - for _ in range(num_embedding_modules) - ] - ) - else: - data_size = len(num_feature_info.items()) - num_embedding_modules = data_size - self.num_embeddings = nn.ModuleList( - [ - nn.Sequential( - nn.Linear(1, self.config.d_model, bias=False), - self.embedding_activation, # Example using ReLU as the activation function, change as needed - ) - for _ in range(num_embedding_modules) - ] - ) - - self.cat_embeddings = nn.ModuleList( - [ - nn.Embedding(num_categories + 1, self.config.d_model) - for feature_name, num_categories in cat_feature_info.items() - ] - ) - - self.mamba = Mamba(self.config) - self.norm_f = self.config.norm(self.config.d_model) - mlp_activation_fn = activations.get( - self.config.tabular_head_activation.lower(), nn.Identity() - ) - - # Dynamically create MLP layers based on config.tabular_units - mlp_layers = [] - input_dim = self.config.d_model # Initial input dimension - - # Iterate over the specified units for each layer in the MLP - for units in self.config.tabular_head_units: - mlp_layers.append(nn.Linear(input_dim, units)) - mlp_layers.append(mlp_activation_fn) - mlp_layers.append(nn.Dropout(self.config.tabular_head_dropout)) - input_dim = units - - # Add the final linear layer to map to a single output value - mlp_layers.append(nn.Linear(input_dim, 1)) - - # Combine all layers into a Sequential module - self.tabular_head = nn.Sequential(*mlp_layers) - - self.pooling_method = self.config.pooling_method - self.cls_token = nn.Parameter(torch.zeros(1, 1, self.config.d_model)) - - if self.config.layer_norm_after_embedding: - self.embedding_norm = nn.LayerNorm(self.config.d_model) - - self.loss_fct = nn.MSELoss() - -
[docs] def forward(self, cat_features, num_features): - """ - Defines the forward pass of the model, processing both categorical and numerical features, - and returning regression predictions. - - Parameters - ---------- - cat_features : Tensor - Tensor containing the categorical features. - num_features : Tensor - Tensor containing the numerical features or raw sequence data, depending on `raw_embeddings`. - - Returns - ------- - Tensor - The output predictions of the model for regression tasks. - """ - batch_size = ( - cat_features[0].size(0) if cat_features != [] else num_features[0].size(0) - ) - cls_tokens = self.cls_token.expand(batch_size, -1, -1) - # Process categorical features if present - if not self.raw_embeddings: - if len(self.cat_embeddings) > 0 and cat_features: - cat_embeddings = [ - emb(cat_features[i]) for i, emb in enumerate(self.cat_embeddings) - ] - cat_embeddings = torch.stack(cat_embeddings, dim=1) - cat_embeddings = torch.squeeze(cat_embeddings, dim=2) - else: - cat_embeddings = None - - if len(self.num_embeddings) > 0 and num_features: - num_embeddings = [] - # Iterate through the num_embeddings, taking slices of num_features for each - for i, emb in enumerate(self.num_embeddings): - # Calculate start and end indices for slicing the list - start_idx = i * self.seq_size - end_idx = start_idx + self.seq_size - - # Slice the num_features list to get the current chunk - current_chunk = num_features[start_idx:end_idx] - - # If the current_chunk is not empty, process it - if current_chunk: - # Concatenate tensors in the current chunk along dimension 1 - chunk_tensor = torch.cat(current_chunk, dim=1) - # Apply the embedding layer to the chunk_tensor - num_embeddings.append(emb(chunk_tensor)) - - # Stack the resulting embeddings along the second dimension if num_embeddings is not empty - if num_embeddings: - num_embeddings = torch.stack(num_embeddings, dim=1) - else: - num_embeddings = None - - else: - # Process categorical features if present - if len(self.cat_embeddings) > 0 and cat_features: - cat_embeddings = [ - emb(cat_features[i]) for i, emb in enumerate(self.cat_embeddings) - ] - cat_embeddings = torch.stack(cat_embeddings, dim=1) - cat_embeddings = torch.squeeze(cat_embeddings, dim=2) - if self.config.layer_norm_after_embedding: - cat_embeddings = self.embedding_norm(cat_embeddings) - else: - cat_embeddings = None - - # Process numerical features if present - if len(self.num_embeddings) > 0 and num_features: - num_embeddings = [ - emb(num_features[i]) for i, emb in enumerate(self.num_embeddings) - ] - num_embeddings = torch.stack(num_embeddings, dim=1) - if self.config.layer_norm_after_embedding: - num_embeddings = self.embedding_norm(num_embeddings) - else: - num_embeddings = None - - # Combine embeddings if both types are present, otherwise use whichever is available - if cat_embeddings is not None and num_embeddings is not None: - x = torch.cat([cls_tokens, cat_embeddings, num_embeddings], dim=1) - elif cat_embeddings is not None: - x = torch.cat([cls_tokens, cat_embeddings], dim=1) - elif num_embeddings is not None: - x = torch.cat([cls_tokens, num_embeddings], dim=1) - else: - raise ValueError("No features provided to the model.") - - x = self.mamba(x) - - # Apply pooling based on the specified method - if self.pooling_method == "avg": - x = torch.mean(x, dim=1) - elif self.pooling_method == "max": - x, _ = torch.max(x, dim=1) - elif self.pooling_method == "sum": - x = torch.sum(x, dim=1) - elif self.pooling_method == "cls_token": - x = x[:, 0] - else: - raise ValueError(f"Invalid pooling method: {self.pooling_method}") - - x = self.norm_f(x) - preds = self.tabular_head(x) - - return preds
- -
[docs] def training_step(self, batch, batch_idx): - """ - Processes a single batch during training, computes the loss, and logs training metrics. - - Parameters - ---------- - batch : tuple - A batch of data from the DataLoader, containing numerical features, categorical features, and labels. - batch_idx : int - The index of the batch within the epoch. - - Returns - ------- - Tensor - The computed loss for the batch. - """ - num_features, cat_features, labels = batch - preds = self(num_features, cat_features) - - loss = self.loss_fct(preds.squeeze(), labels.float()) - self.log( - "train_loss", - loss, - on_step=True, - on_epoch=True, - prog_bar=True, - logger=True, - ) - return loss
- -
[docs] def validation_step(self, batch, batch_idx): - """ - Processes a single batch during validation, computes the loss, and logs validation metrics. - - Parameters - ---------- - batch : tuple - A batch of data from the DataLoader, containing numerical features, categorical features, and labels. - batch_idx : int - The index of the batch within the epoch. - """ - num_features, cat_features, labels = batch - preds = self(num_features, cat_features) - - loss = self.loss_fct(preds.squeeze(), labels.float()) - self.log( - "val_loss", - loss, - on_step=True, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - return loss
- -
[docs] def configure_optimizers(self): - """ - Sets up the model's optimizer and learning rate scheduler based on the configurations provided. - - Returns - ------- - dict - A dictionary containing the optimizer and lr_scheduler configurations. - """ - optimizer = torch.optim.Adam( - self.parameters(), lr=self.lr, weight_decay=self.config.weight_decay - ) - scheduler = { - "scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau( - optimizer, - mode="min", - factor=self.lr_factor, - patience=self.lr_patience, - verbose=True, - ), - "monitor": "val_loss", # Name of the metric to monitor - "interval": "epoch", - "frequency": 1, - } - - return {"optimizer": optimizer, "lr_scheduler": scheduler}
-
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/base_models/regressor.html b/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/base_models/regressor.html deleted file mode 100644 index e1fa616..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/base_models/regressor.html +++ /dev/null @@ -1,425 +0,0 @@ - - - - - - mambular.base_models.regressor — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mambular.base_models.regressor

-import torch
-import torch.nn as nn
-from ..utils.mamba_arch import Mamba
-from ..utils.config import MambularConfig
-import pytorch_lightning as pl
-
-
-
[docs]class BaseMambularRegressor(pl.LightningModule): - """ - A base regression module for tabular data built on PyTorch Lightning. It incorporates embeddings - for categorical and numerical features with a configurable architecture provided by MambularConfig. - This module is designed for regression tasks. - - Parameters - ---------- - config : MambularConfig - An instance of MambularConfig containing configuration parameters for the model architecture. - cat_feature_info : dict, optional - A dictionary mapping the names of categorical features to their number of unique categories. Defaults to None. - num_feature_info : dict, optional - A dictionary mapping the names of numerical features to their number of dimensions after embedding. Defaults to None. - lr : float, optional - The initial learning rate for the optimizer. Defaults to 1e-03. - lr_patience : int, optional - The number of epochs with no improvement after which learning rate will be reduced. Defaults to 10. - weight_decay : float, optional - Weight decay (L2 penalty) coefficient. Defaults to 0.025. - lr_factor : float, optional - Factor by which the learning rate will be reduced. Defaults to 0.75. - - Attributes - ---------- - mamba : Mamba - The core neural network module implementing the Mamba architecture. - norm_f : nn.Module - Normalization layer applied after the Mamba block. - tabular_head : nn.Linear - Final linear layer mapping the features to a single output for regression tasks. - train_mse : torchmetrics.MeanSquaredError - Metric computation module for training Mean Squared Error. - val_mse : torchmetrics.MeanSquaredError - Metric computation module for validation Mean Squared Error. - loss_fct : torch.nn.MSELoss - The loss function for regression tasks. - - Methods - ------- - forward(cat_features, num_features) - Defines the forward pass of the model. - training_step(batch, batch_idx) - Processes a single batch during training. - validation_step(batch, batch_idx) - Processes a single batch during validation. - configure_optimizers() - Sets up the model's optimizer and learning rate scheduler. - """ - - - def __init__( - self, - config: MambularConfig, - cat_feature_info: dict = None, - num_feature_info: dict = None, - lr=1e-03, - lr_patience=10, - weight_decay=0.025, - lr_factor=0.75, - ): - super().__init__() - - self.config = config - self.lr = lr - self.lr_patience = lr_patience - self.weight_decay = weight_decay - self.lr_factor = lr_factor - self.cat_feature_info = cat_feature_info - self.num_feature_info = num_feature_info - - activations = { - "relu": nn.ReLU(), - "tanh": nn.Tanh(), - "sigmoid": nn.Sigmoid(), - "leaky_relu": nn.LeakyReLU(), - "elu": nn.ELU(), - "selu": nn.SELU(), - "gelu": nn.GELU(), - "softplus": nn.Softplus(), - "leakyrelu": nn.LeakyReLU(), - "linear": nn.Identity(), - } - - self.embedding_activation = activations.get( - self.config.num_embedding_activation.lower() - ) - if self.embedding_activation is None: - raise ValueError( - f"Unsupported activation function: {self.config.num_embedding_activation}" - ) - - self.num_embeddings = nn.ModuleList( - [ - nn.Sequential( - nn.Linear(input_shape, self.config.d_model, bias=False), - self.embedding_activation, # Example using ReLU as the activation function, change as needed - ) - for feature_name, input_shape in num_feature_info.items() - ] - ) - - # Create embedding layers for categorical features based on cat_feature_info - self.cat_embeddings = nn.ModuleList( - [ - nn.Embedding(num_categories + 1, self.config.d_model) - for feature_name, num_categories in cat_feature_info.items() - ] - ) - - self.mamba = Mamba(self.config) - self.norm_f = self.config.norm(self.config.d_model) - mlp_activation_fn = activations.get( - self.config.tabular_head_activation.lower(), nn.Identity() - ) - - # Dynamically create MLP layers based on config.tabular_units - mlp_layers = [] - input_dim = self.config.d_model # Initial input dimension - - # Iterate over the specified units for each layer in the MLP - for units in self.config.tabular_head_units: - mlp_layers.append(nn.Linear(input_dim, units)) - mlp_layers.append(mlp_activation_fn) - mlp_layers.append(nn.Dropout(self.config.tabular_head_dropout)) - input_dim = units - - # Add the final linear layer to map to a single output value - mlp_layers.append(nn.Linear(input_dim, 1)) - - # Combine all layers into a Sequential module - self.tabular_head = nn.Sequential(*mlp_layers) - - self.pooling_method = self.config.pooling_method - self.cls_token = nn.Parameter(torch.zeros(1, 1, self.config.d_model)) - - self.loss_fct = nn.MSELoss() - - if self.config.layer_norm_after_embedding: - self.embedding_norm = nn.LayerNorm(self.config.d_model) - -
[docs] def forward(self, cat_features, num_features): - """ - Defines the forward pass of the regressor. - - Parameters - ---------- - cat_features : Tensor - Tensor containing the categorical features. - num_features : Tensor - Tensor containing the numerical features. - - Returns - ------- - Tensor - The output predictions of the model for regression tasks. - """ - - batch_size = ( - cat_features[0].size(0) if cat_features != [] else num_features[0].size(0) - ) - cls_tokens = self.cls_token.expand(batch_size, -1, -1) - - # Process categorical features if present - if len(self.cat_embeddings) > 0 and cat_features: - cat_embeddings = [ - emb(cat_features[i]) for i, emb in enumerate(self.cat_embeddings) - ] - cat_embeddings = torch.stack(cat_embeddings, dim=1) - cat_embeddings = torch.squeeze(cat_embeddings, dim=2) - if self.config.layer_norm_after_embedding: - cat_embeddings = self.embedding_norm(cat_embeddings) - else: - cat_embeddings = None - - # Process numerical features if present - if len(self.num_embeddings) > 0 and num_features: - num_embeddings = [ - emb(num_features[i]) for i, emb in enumerate(self.num_embeddings) - ] - num_embeddings = torch.stack(num_embeddings, dim=1) - if self.config.layer_norm_after_embedding: - num_embeddings = self.embedding_norm(num_embeddings) - else: - num_embeddings = None - - # Combine embeddings if both types are present, otherwise use whichever is available - - if cat_embeddings is not None and num_embeddings is not None: - x = torch.cat([cls_tokens, cat_embeddings, num_embeddings], dim=1) - elif cat_embeddings is not None: - x = torch.cat([cls_tokens, cat_embeddings], dim=1) - elif num_embeddings is not None: - x = torch.cat([cls_tokens, num_embeddings], dim=1) - else: - raise ValueError("No features provided to the model.") - - x = self.mamba(x) - - # Apply pooling based on the specified method - if self.pooling_method == "avg": - x = torch.mean(x, dim=1) - elif self.pooling_method == "max": - x, _ = torch.max(x, dim=1) - elif self.pooling_method == "sum": - x = torch.sum(x, dim=1) - elif self.pooling_method == "cls_token": - x = x[:, 0] - else: - raise ValueError(f"Invalid pooling method: {self.pooling_method}") - - x = self.norm_f(x) - preds = self.tabular_head(x) - - return preds
- -
[docs] def training_step(self, batch, batch_idx): - """ - Defines the forward pass of the regressor. - - Parameters - ---------- - cat_features : Tensor - Tensor containing the categorical features. - num_features : Tensor - Tensor containing the numerical features. - - Returns - ------- - Tensor - The output predictions of the model for regression tasks. - """ - num_features, cat_features, labels = batch - preds = self(num_features, cat_features) - - loss = self.loss_fct(preds.squeeze(), labels.float()) - self.log( - "train_loss", - loss, - on_step=True, - on_epoch=True, - prog_bar=True, - logger=True, - ) - return loss
- -
[docs] def validation_step(self, batch, batch_idx): - """ - Processes a single batch during validation, computes the loss, and logs validation metrics. - - Parameters - ---------- - batch : tuple - A batch of data from the DataLoader, containing numerical features, categorical features, and labels. - batch_idx : int - The index of the batch within the epoch. - """ - num_features, cat_features, labels = batch - preds = self(num_features, cat_features) - - loss = self.loss_fct(preds.squeeze(), labels.float()) - self.log( - "val_loss", - loss, - on_step=True, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - return loss
- -
[docs] def configure_optimizers(self): - """ - Sets up the model's optimizer and learning rate scheduler based on the configurations provided. - - Returns - ------- - dict - A dictionary containing the optimizer and lr_scheduler configurations. - """ - optimizer = torch.optim.Adam( - self.parameters(), lr=self.lr, weight_decay=self.config.weight_decay - ) - scheduler = { - "scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau( - optimizer, - mode="min", - factor=self.lr_factor, - patience=self.lr_patience, - verbose=True, - ), - "monitor": "val_loss", # Name of the metric to monitor - "interval": "epoch", - "frequency": 1, - } - - return {"optimizer": optimizer, "lr_scheduler": scheduler}
-
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/models/sklearn_classifier.html b/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/models/sklearn_classifier.html deleted file mode 100644 index ca3427f..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/models/sklearn_classifier.html +++ /dev/null @@ -1,707 +0,0 @@ - - - - - - mambular.models.sklearn_classifier — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mambular.models.sklearn_classifier

-from sklearn.model_selection import train_test_split
-import pytorch_lightning as pl
-import torch
-from torch.utils.data import DataLoader
-import numpy as np
-from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
-from ..base_models.classifier import BaseMambularClassifier
-from ..utils.config import MambularConfig
-from ..utils.preprocessor import Preprocessor
-from ..utils.dataset import MambularDataModule, MambularDataset
-from sklearn.base import BaseEstimator
-import pandas as pd
-from sklearn.metrics import accuracy_score
-
-
-
[docs]class MambularClassifier(BaseEstimator): - """ - A classifier that mimics scikit-learn's API using PyTorch Lightning and a custom architecture. - - This classifier is designed to work with tabular data and provides a flexible interface for specifying model - configurations and preprocessing steps. It integrates smoothly with scikit-learn's utilities, such as cross-validation - and grid search. - - Parameters - ---------- - **kwargs : Various - Accepts any number of keyword arguments that are passed to the MambularConfig and Preprocessor classes. - Known configuration arguments for the model are extracted based on a predefined list, and the rest are - passed to the Preprocessor. - - Attributes - ---------- - config : MambularConfig - Configuration object that holds model-specific settings. - preprocessor : Preprocessor - Preprocessor object for handling feature preprocessing like normalization and encoding. - model : BaseMambularClassifier or None - The underlying PyTorch Lightning model, instantiated upon calling the `fit` method. - """ - - def __init__(self, **kwargs): - # Known config arguments - print("Received kwargs:", kwargs) - config_arg_names = [ - "d_model", - "n_layers", - "dt_rank", - "output_dimension", - "pooling_method", - "norm", - "cls", - "dt_min", - "dt_max", - "dropout", - "bias", - "weight_decay", - "conv_bias", - "d_state", - "expand_factor", - "d_conv", - "dt_init", - "dt_scale", - "dt_init_floor", - "tabular_head_units", - "tabular_head_activation", - "tabular_head_dropout", - "num_emebedding_activation", - "layer_norm_after_embedding", - ] - self.config_kwargs = {k: v for k, v in kwargs.items() if k in config_arg_names} - self.config = MambularConfig(**self.config_kwargs) - - # The rest are assumed to be preprocessor arguments - preprocessor_kwargs = { - k: v for k, v in kwargs.items() if k not in config_arg_names - } - self.preprocessor = Preprocessor(**preprocessor_kwargs) - self.model = None - - def get_params(self, deep=True): - """ - Get parameters for this estimator. - - Parameters - ---------- - deep : bool, default=True - If True, will return the parameters for this estimator and contained subobjects that are estimators. - - Returns - ------- - params : dict - Parameter names mapped to their values. - """ - - params = self.config_kwargs # Parameters used to initialize MambularConfig - - # If deep=True, include parameters from nested components like preprocessor - if deep: - # Assuming Preprocessor has a get_params method - preprocessor_params = { - "preprocessor__" + key: value - for key, value in self.preprocessor.get_params().items() - } - params.update(preprocessor_params) - - return params - - def set_params(self, **parameters): - """ - Set the parameters of this estimator. - - Parameters - ---------- - **parameters : dict - Estimator parameters. - - Returns - ------- - self : object - Estimator instance. - """ - # Update config_kwargs with provided parameters - valid_config_keys = self.config_kwargs.keys() - config_updates = {k: v for k, v in parameters.items() if k in valid_config_keys} - self.config_kwargs.update(config_updates) - - # Update the config object - for key, value in config_updates.items(): - setattr(self.config, key, value) - - # Handle preprocessor parameters (prefixed with 'preprocessor__') - preprocessor_params = { - k.split("__")[1]: v - for k, v in parameters.items() - if k.startswith("preprocessor__") - } - if preprocessor_params: - # Assuming Preprocessor has a set_params method - self.preprocessor.set_params(**preprocessor_params) - - return self - - def split_data(self, X, y, val_size, random_state): - """ - Split the dataset into training and validation sets. - - Parameters - ---------- - X : array-like of shape (n_samples, n_features) - The input samples. - y : array-like of shape (n_samples,) - The target values. - val_size : float - The proportion of the dataset to include in the validation split. - random_state : int - Controls the shuffling applied to the data before applying the split. - - Returns - ------- - X_train, X_val, y_train, y_val : arrays - The split datasets. - """ - X_train, X_val, y_train, y_val = train_test_split( - X, y, test_size=val_size, random_state=random_state - ) - - return X_train, X_val, y_train, y_val - - def preprocess_data(self, X_train, y_train, X_val, y_val, batch_size, shuffle): - """ - Preprocess the training and validation data and create corresponding DataLoaders. - - Parameters - ---------- - X_train : array-like of shape (n_samples, n_features) - The training input samples. - y_train : array-like of shape (n_samples,) - The training target values. - X_val : array-like of shape (n_samples, n_features) - The validation input samples. - y_val : array-like of shape (n_samples,) - The validation target values. - batch_size : int - Size of mini-batches for the DataLoader. - shuffle : bool - Whether to shuffle the training data before splitting into batches. - - Returns - ------- - data_module : MambularDataModule - An instance of MambularDataModule containing training and validation DataLoaders. - """ - - train_preprocessed_data = self.preprocessor.fit_transform(X_train, y_train) - val_preprocessed_data = self.preprocessor.transform(X_val) - - # Update feature info based on the actual processed data - ( - self.cat_feature_info, - self.num_feature_info, - ) = self.preprocessor.get_feature_info() - - # Initialize lists for tensors - train_cat_tensors = [] - train_num_tensors = [] - val_cat_tensors = [] - val_num_tensors = [] - - # Populate tensors for categorical features, if present in processed data - for key in self.cat_feature_info: - cat_key = "cat_" + key # Assuming categorical keys are prefixed with 'cat_' - if cat_key in train_preprocessed_data: - train_cat_tensors.append( - torch.tensor(train_preprocessed_data[cat_key], dtype=torch.long) - ) - if cat_key in val_preprocessed_data: - val_cat_tensors.append( - torch.tensor(val_preprocessed_data[cat_key], dtype=torch.long) - ) - - binned_key = "num_" + key # for binned features - if binned_key in train_preprocessed_data: - train_cat_tensors.append( - torch.tensor(train_preprocessed_data[binned_key], dtype=torch.long) - ) - - if binned_key in val_preprocessed_data: - val_cat_tensors.append( - torch.tensor(val_preprocessed_data[binned_key], dtype=torch.long) - ) - - # Populate tensors for numerical features, if present in processed data - for key in self.num_feature_info: - num_key = "num_" + key # Assuming numerical keys are prefixed with 'num_' - if num_key in train_preprocessed_data: - train_num_tensors.append( - torch.tensor(train_preprocessed_data[num_key], dtype=torch.float) - ) - if num_key in val_preprocessed_data: - val_num_tensors.append( - torch.tensor(val_preprocessed_data[num_key], dtype=torch.float) - ) - - train_labels = torch.tensor(y_train, dtype=torch.long) - val_labels = torch.tensor(y_val, dtype=torch.long) - - # Create datasets - train_dataset = MambularDataset( - train_cat_tensors, train_num_tensors, train_labels, regression=False - ) - val_dataset = MambularDataset( - val_cat_tensors, val_num_tensors, val_labels, regression=False - ) - - # Create dataloaders - train_dataloader = DataLoader( - train_dataset, batch_size=batch_size, shuffle=shuffle - ) - val_dataloader = DataLoader(val_dataset, batch_size=batch_size) - - return MambularDataModule(train_dataloader, val_dataloader) - - def preprocess_test_data(self, X): - """ - Preprocesses the test data and creates tensors for categorical and numerical features. - - Parameters - ---------- - X : DataFrame or array-like, shape (n_samples, n_features) - Test feature set. - - Returns - ------- - cat_tensors : list of Tensors - List of tensors for each categorical feature. - num_tensors : list of Tensors - List of tensors for each numerical feature. - """ - processed_data = self.preprocessor.transform(X) - - # Initialize lists for tensors - cat_tensors = [] - num_tensors = [] - - # Populate tensors for categorical features - for key in self.cat_feature_info: - cat_key = "cat_" + key # Assuming categorical keys are prefixed with 'cat_' - if cat_key in processed_data: - cat_tensors.append( - torch.tensor(processed_data[cat_key], dtype=torch.long) - ) - - binned_key = "num_" + key # for binned features - if binned_key in processed_data: - cat_tensors.append( - torch.tensor(processed_data[binned_key], dtype=torch.long) - ) - - # Populate tensors for numerical features - for key in self.num_feature_info: - num_key = "num_" + key # Assuming numerical keys are prefixed with 'num_' - if num_key in processed_data: - num_tensors.append( - torch.tensor(processed_data[num_key], dtype=torch.float) - ) - - return cat_tensors, num_tensors - - def fit( - self, - X, - y, - val_size=0.2, - X_val=None, - y_val=None, - max_epochs=100, - random_state=101, - batch_size=64, - shuffle=True, - patience=10, - monitor="val_loss", - mode="min", - lr=1e-3, - lr_patience=10, - factor=0.75, - weight_decay=0.025, - **trainer_kwargs - ): - """ - Fit the model to the given training data, optionally using a separate validation set. - - Parameters - ---------- - X : array-like or pd.DataFrame of shape (n_samples, n_features) - The training input samples. - y : array-like of shape (n_samples,) or (n_samples, n_outputs) - The target values (class labels in classification, real numbers in regression). - val_size : float, default=0.2 - The proportion of the dataset to include in the validation split if `X_val` is None. Ignored if `X_val` is provided. - X_val : array-like or pd.DataFrame of shape (n_samples, n_features), optional - The validation input samples. If provided, `X` and `y` are not split and this data is used for validation. - y_val : array-like of shape (n_samples,) or (n_samples, n_outputs), optional - The validation target values. Required if `X_val` is provided. - max_epochs : int, default=100 - Maximum number of epochs for training. - random_state : int, default=101 - Seed used by the random number generator for shuffling the data if `X_val` is not provided. - batch_size : int, default=64 - Number of samples per gradient update. - shuffle : bool, default=True - Whether to shuffle the training data before each epoch if `X_val` is not provided. - patience : int, default=10 - Number of epochs with no improvement after which training will be stopped if using early stopping. - monitor : str, default="val_loss" - Quantity to be monitored for early stopping. - mode : str, default="min" - One of {"min", "max"}. In "min" mode, training will stop when the quantity monitored has stopped decreasing; in "max" mode, it will stop when the quantity monitored has stopped increasing. - lr : float, default=1e-3 - Learning rate for the optimizer. - lr_patience : int, default=10 - Number of epochs with no improvement after which the learning rate will be reduced. - factor : float, default=0.75 - Factor by which the learning rate will be reduced. new_lr = lr * factor. - weight_decay : float, default=0.025 - Weight decay (L2 penalty) parameter. - **trainer_kwargs : dict - Additional keyword arguments to be passed to the PyTorch Lightning Trainer constructor. - - Returns - ------- - self : object - The fitted estimator. - """ - - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - if X_val: - if not isinstance(X_val, pd.DataFrame): - X_val = pd.DataFrame(X_val) - - num_classes = len(np.unique(y)) - - if not X_val: - X_train, X_val, y_train, y_val = self.split_data( - X, y, val_size, random_state - ) - - data_module = self.preprocess_data( - X_train, y_train, X_val, y_val, batch_size, shuffle - ) - - self.model = BaseMambularClassifier( - num_classes=num_classes, - config=self.config, - cat_feature_info=self.cat_feature_info, - num_feature_info=self.num_feature_info, - lr=lr, - lr_patience=lr_patience, - lr_factor=factor, - weight_decay=weight_decay, - ) - - early_stop_callback = EarlyStopping( - monitor=monitor, min_delta=0.00, patience=patience, verbose=False, mode=mode - ) - - checkpoint_callback = ModelCheckpoint( - monitor="val_loss", # Adjust according to your validation metric - mode="min", - save_top_k=1, - dirpath="model_checkpoints", # Specify the directory to save checkpoints - filename="best_model", - ) - - # Initialize the trainer and train the model - trainer = pl.Trainer( - max_epochs=max_epochs, - callbacks=[early_stop_callback, checkpoint_callback], - **trainer_kwargs - ) - trainer.fit(self.model, data_module) - - best_model_path = checkpoint_callback.best_model_path - if best_model_path: - checkpoint = torch.load(best_model_path) - self.model.load_state_dict(checkpoint["state_dict"]) - - return self - - def predict(self, X): - """ - Predict the class labels for the given input samples. - - Parameters - ---------- - X : array-like or pd.DataFrame of shape (n_samples, n_features) - The input samples to predict. - - Returns - ------- - predictions : ndarray of shape (n_samples,) - Predicted class labels for each input sample. - - Notes - ----- - The method preprocesses the input data using the same preprocessor used during training, - sets the model to evaluation mode, and then performs inference to predict the class labels. - The predictions are converted from a PyTorch tensor to a NumPy array before being returned. - """ - # Preprocess the data - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - device = next(self.model.parameters()).device - cat_tensors, num_tensors = self.preprocess_test_data(X) - if isinstance(cat_tensors, list): - cat_tensors = [tensor.to(device) for tensor in cat_tensors] - else: - cat_tensors = cat_tensors.to(device) - - if isinstance(num_tensors, list): - num_tensors = [tensor.to(device) for tensor in num_tensors] - else: - num_tensors = num_tensors.to(device) - - # Set the model to evaluation mode - self.model.eval() - - # Perform inference - with torch.no_grad(): - logits = self.model(cat_tensors, num_tensors) - predictions = torch.argmax(logits, dim=1) - - # Convert predictions to NumPy array and return - return predictions.cpu().numpy() - - def predict_proba(self, X): - """ - Predict class probabilities for the given input samples. - - Example - ------- - from sklearn.metrics import accuracy_score, precision_score, f1_score, roc_auc_score - - # Define the metrics you want to evaluate - metrics = { - 'Accuracy': (accuracy_score, False), - 'Precision': (precision_score, False), - 'F1 Score': (f1_score, False), - 'AUC Score': (roc_auc_score, True) - } - - # Assuming 'X_test' and 'y_test' are your test dataset and labels - # Evaluate using the specified metrics - results = classifier.evaluate(X_test, y_test, metrics=metrics) - - Parameters - ---------- - X : array-like or pd.DataFrame of shape (n_samples, n_features) - The input samples for which to predict class probabilities. - - Returns - ------- - probabilities : ndarray of shape (n_samples, n_classes) - Predicted class probabilities for each input sample. - - Notes - ----- - The method preprocesses the input data using the same preprocessor used during training, - sets the model to evaluation mode, and then performs inference to predict the class probabilities. - Softmax is applied to the logits to obtain probabilities, which are then converted from a PyTorch tensor - to a NumPy array before being returned. - """ - # Preprocess the data - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - device = next(self.model.parameters()).device - cat_tensors, num_tensors = self.preprocess_test_data(X) - if isinstance(cat_tensors, list): - cat_tensors = [tensor.to(device) for tensor in cat_tensors] - else: - cat_tensors = cat_tensors.to(device) - - if isinstance(num_tensors, list): - num_tensors = [tensor.to(device) for tensor in num_tensors] - else: - num_tensors = num_tensors.to(device) - - # Set the model to evaluation mode - self.model.eval() - - # Perform inference - with torch.no_grad(): - logits = self.model(cat_tensors, num_tensors) - probabilities = torch.softmax(logits, dim=1) - - # Convert probabilities to NumPy array and return - return probabilities.cpu().numpy() - - def evaluate(self, X, y_true, metrics=None): - """ - Evaluate the model on the given data using specified metrics. - - Parameters - ---------- - X : array-like or pd.DataFrame of shape (n_samples, n_features) - The input samples to predict. - y_true : array-like of shape (n_samples,) - The true class labels against which to evaluate the predictions. - metrics : dict - A dictionary where keys are metric names and values are tuples containing the metric function - and a boolean indicating whether the metric requires probability scores (True) or class labels (False). - - Returns - ------- - scores : dict - A dictionary with metric names as keys and their corresponding scores as values. - - Notes - ----- - This method uses either the `predict` or `predict_proba` method depending on the metric requirements. - """ - # Ensure input is in the correct format - if metrics is None: - metrics = {"Accuracy": (accuracy_score, False)} - - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - - # Initialize dictionary to store results - scores = {} - - # Generate class probabilities if any metric requires them - if any(use_proba for _, use_proba in metrics.values()): - probabilities = self.predict_proba(X) - - # Generate class labels if any metric requires them - if any(not use_proba for _, use_proba in metrics.values()): - predictions = self.predict(X) - - # Compute each metric - for metric_name, (metric_func, use_proba) in metrics.items(): - if use_proba: - scores[metric_name] = metric_func(y_true, probabilities) - else: - scores[metric_name] = metric_func(y_true, predictions) - - return scores
-
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/models/sklearn_distributional.html b/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/models/sklearn_distributional.html deleted file mode 100644 index c989cf4..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/models/sklearn_distributional.html +++ /dev/null @@ -1,676 +0,0 @@ - - - - - - mambular.models.sklearn_distributional — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mambular.models.sklearn_distributional

-from sklearn.model_selection import train_test_split
-import pytorch_lightning as pl
-import torch
-from torch.utils.data import DataLoader
-from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
-from ..base_models.distributional import BaseMambularLSS
-from ..utils.config import MambularConfig
-from ..utils.preprocessor import Preprocessor
-from ..utils.dataset import MambularDataModule, MambularDataset
-from sklearn.base import BaseEstimator
-import pandas as pd
-from ..utils.distributional_metrics import (
-    poisson_deviance,
-    gamma_deviance,
-    beta_brier_score,
-    dirichlet_error,
-    student_t_loss,
-    negative_binomial_deviance,
-    inverse_gamma_loss,
-)
-from sklearn.metrics import mean_squared_error, accuracy_score
-import numpy as np
-import properscoring as ps
-
-
-
[docs]class MambularLSS(BaseEstimator): - """ - MambularLSS is a machine learning estimator that is designed for structured data, - incorporating both preprocessing and a deep learning model. The estimator - integrates configurable components for data preprocessing and the neural network model, - facilitating end-to-end training and prediction workflows. - - The initialization of this class separates configuration arguments for the model and - the preprocessor, allowing for flexible adjustment of parameters. - - Attributes - ---------- - config : MambularConfig - Configuration object containing model-specific parameters. - preprocessor : Preprocessor - Preprocessor object for data preprocessing steps. - model : torch.nn.Module - The neural network model, initialized based on 'config'. - - Parameters - ---------- - **kwargs : Arbitrary keyword arguments, divided into configuration for the model and - preprocessing. Recognized keys include model parameters such as 'd_model', - 'n_layers', etc., and any additional keys are assumed to be preprocessor arguments. - """ - - def __init__(self, **kwargs): - # Known config arguments - config_arg_names = [ - "d_model", - "n_layers", - "dt_rank", - "output_dimension", - "pooling_method", - "norm", - "cls", - "dt_min", - "dt_max", - "dropout", - "bias", - "weight_decay", - "conv_bias", - "d_state", - "expand_factor", - "d_conv", - "dt_init", - "dt_scale", - "dt_init_floor", - "tabular_head_units", - "tabular_head_activation", - "tabular_head_dropout", - "num_emebedding_activation", - ] - config_kwargs = {k: v for k, v in kwargs.items() if k in config_arg_names} - self.config = MambularConfig(**config_kwargs) - - # The rest are assumed to be preprocessor arguments - preprocessor_kwargs = { - k: v for k, v in kwargs.items() if k not in config_arg_names - } - self.preprocessor = Preprocessor(**preprocessor_kwargs) - self.model = None - - def get_params(self, deep=True): - """ - Get parameters for this estimator, optionally including parameters from nested components - like the preprocessor. - - Parameters - ---------- - deep : bool, default=True - If True, return parameters of nested components. - - Returns - ------- - dict - A dictionary mapping parameter names to their values. For nested components, - parameter names are prefixed accordingly (e.g., 'preprocessor__<param_name>'). - """ - - params = self.config_kwargs # Parameters used to initialize MambularConfig - - # If deep=True, include parameters from nested components like preprocessor - if deep: - # Assuming Preprocessor has a get_params method - preprocessor_params = { - "preprocessor__" + key: value - for key, value in self.preprocessor.get_params().items() - } - params.update(preprocessor_params) - - return params - - def set_params(self, **parameters): - """ - Set the parameters of this estimator, allowing for modifications to both the configuration - and preprocessor parameters. Parameters not recognized as configuration arguments are - assumed to be preprocessor arguments. - - Parameters - ---------- - **parameters: Arbitrary keyword arguments where keys are parameter names and values - are the new parameter values. - - Returns - ------- - self: This instance with updated parameters. - """ - # Update config_kwargs with provided parameters - valid_config_keys = self.config_kwargs.keys() - config_updates = {k: v for k, v in parameters.items() if k in valid_config_keys} - self.config_kwargs.update(config_updates) - - # Update the config object - for key, value in config_updates.items(): - setattr(self.config, key, value) - - # Handle preprocessor parameters (prefixed with 'preprocessor__') - preprocessor_params = { - k.split("__")[1]: v - for k, v in parameters.items() - if k.startswith("preprocessor__") - } - if preprocessor_params: - # Assuming Preprocessor has a set_params method - self.preprocessor.set_params(**preprocessor_params) - - return self - - def split_data(self, X, y, val_size, random_state): - """ - Split the dataset into training and validation sets. - - Parameters - ---------- - X : array-like - Features of the dataset. - y : array-like - Target values. - val_size : float - The proportion of the dataset to include in the validation split. - random_state : int, optional - The seed used by the random number generator for reproducibility. - - Returns - ------- - tuple - A tuple containing split datasets (X_train, X_val, y_train, y_val). - """ - X_train, X_val, y_train, y_val = train_test_split( - X, y, test_size=val_size, random_state=random_state - ) - - return X_train, X_val, y_train, y_val - - def preprocess_data(self, X_train, y_train, X_val, y_val, batch_size, shuffle): - """ - Preprocess the training and validation data, fit the preprocessor on the training data, - and transform both training and validation data. This method also initializes tensors - for categorical and numerical features and labels, and prepares DataLoader objects for - both datasets. - - Parameters - ---------- - X_train : array-like - Training features. - y_train : array-like - Training target values. - X_val : array-like - Validation features. - y_val : array-like - Validation target values. - batch_size : int - Batch size for DataLoader objects. - shuffle : bool - Whether to shuffle the training data in the DataLoader. - - Returns - ------- - MambularDataModule - An object containing DataLoaders for training and validation datasets. - """ - train_preprocessed_data = self.preprocessor.fit_transform(X_train, y_train) - val_preprocessed_data = self.preprocessor.transform(X_val) - - # Update feature info based on the actual processed data - ( - self.cat_feature_info, - self.num_feature_info, - ) = self.preprocessor.get_feature_info() - - # Initialize lists for tensors - train_cat_tensors = [] - train_num_tensors = [] - val_cat_tensors = [] - val_num_tensors = [] - - # Populate tensors for categorical features, if present in processed data - for key in self.cat_feature_info: - cat_key = "cat_" + key # Assuming categorical keys are prefixed with 'cat_' - if cat_key in train_preprocessed_data: - train_cat_tensors.append( - torch.tensor(train_preprocessed_data[cat_key], dtype=torch.long) - ) - if cat_key in val_preprocessed_data: - val_cat_tensors.append( - torch.tensor(val_preprocessed_data[cat_key], dtype=torch.long) - ) - - binned_key = "num_" + key # for binned features - if binned_key in train_preprocessed_data: - train_cat_tensors.append( - torch.tensor(train_preprocessed_data[binned_key], dtype=torch.long) - ) - - if binned_key in val_preprocessed_data: - val_cat_tensors.append( - torch.tensor(val_preprocessed_data[binned_key], dtype=torch.long) - ) - - # Populate tensors for numerical features, if present in processed data - for key in self.num_feature_info: - num_key = "num_" + key # Assuming numerical keys are prefixed with 'num_' - if num_key in train_preprocessed_data: - train_num_tensors.append( - torch.tensor(train_preprocessed_data[num_key], dtype=torch.float) - ) - if num_key in val_preprocessed_data: - val_num_tensors.append( - torch.tensor(val_preprocessed_data[num_key], dtype=torch.float) - ) - - train_labels = torch.tensor(y_train, dtype=torch.float) - val_labels = torch.tensor(y_val, dtype=torch.float) - - # Create datasets - train_dataset = MambularDataset( - train_cat_tensors, train_num_tensors, train_labels - ) - val_dataset = MambularDataset(val_cat_tensors, val_num_tensors, val_labels) - - # Create dataloaders - train_dataloader = DataLoader( - train_dataset, batch_size=batch_size, shuffle=shuffle - ) - val_dataloader = DataLoader(val_dataset, batch_size=batch_size) - - return MambularDataModule(train_dataloader, val_dataloader) - - def preprocess_test_data(self, X): - """ - Preprocess test data using the fitted preprocessor. This method prepares tensors for - categorical and numerical features based on the preprocessed test data. - - Parameters - ---------- - X : array-like - Test features to preprocess. - - Returns - ------- - tuple - A tuple containing lists of tensors for categorical and numerical features. - """ - processed_data = self.preprocessor.transform(X) - - # Initialize lists for tensors - cat_tensors = [] - num_tensors = [] - - # Populate tensors for categorical features - for key in self.cat_feature_info: - cat_key = "cat_" + key # Assuming categorical keys are prefixed with 'cat_' - if cat_key in processed_data: - cat_tensors.append( - torch.tensor(processed_data[cat_key], dtype=torch.long) - ) - - binned_key = "num_" + key # for binned features - if binned_key in processed_data: - cat_tensors.append( - torch.tensor(processed_data[binned_key], dtype=torch.long) - ) - - # Populate tensors for numerical features - for key in self.num_feature_info: - num_key = "num_" + key # Assuming numerical keys are prefixed with 'num_' - if num_key in processed_data: - num_tensors.append( - torch.tensor(processed_data[num_key], dtype=torch.float) - ) - - return cat_tensors, num_tensors - - def fit( - self, - X, - y, - family, - val_size=0.2, - X_val=None, - y_val=None, - max_epochs=100, - random_state=101, - batch_size=64, - shuffle=True, - patience=10, - monitor="val_loss", - mode="min", - lr=1e-3, - lr_patience=10, - factor=0.75, - weight_decay=0.025, - **trainer_kwargs, - ): - """ - Fits the model to the provided data, using the specified loss distribution family for the prediction task. - - Parameters - ---------- - X : DataFrame or array-like, shape (n_samples, n_features) - Training features. - y : array-like, shape (n_samples,) or (n_samples, n_targets) - Target values for training. - family : str - The name of the distribution family to use for the loss function. Examples include 'normal' for regression tasks. - val_size : float, default=0.2 - Proportion of the dataset to include in the validation split if `X_val` is None. - X_val : DataFrame or array-like, shape (n_samples, n_features), optional - Validation features. If provided, `X` and `y` are not split. - y_val : array-like, shape (n_samples,) or (n_samples, n_targets), optional - Validation target values. Required if `X_val` is provided. - max_epochs : int, default=100 - Maximum number of epochs for training. - random_state : int, default=101 - Seed used by the random number generator for shuffling the data. - batch_size : int, default=64 - Number of samples per gradient update. - shuffle : bool, default=True - Whether to shuffle the training data before each epoch. - patience : int, default=10 - Number of epochs with no improvement on the validation metric to wait before early stopping. - monitor : str, default="val_loss" - The metric to monitor for early stopping. - mode : str, default="min" - In 'min' mode, training will stop when the quantity monitored has stopped decreasing; - in 'max' mode, it will stop when the quantity monitored has stopped increasing. - lr : float, default=1e-3 - Learning rate for the optimizer. - lr_patience : int, default=10 - Number of epochs with no improvement on the validation metric to wait before reducing the learning rate. - factor : float, default=0.75 - Factor by which the learning rate will be reduced. - weight_decay : float, default=0.025 - Weight decay (L2 penalty) parameter. - **trainer_kwargs : dict - Additional keyword arguments for PyTorch Lightning's Trainer class. - - Returns - ------- - self : object - The fitted estimator. - """ - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - if X_val: - if not isinstance(X_val, pd.DataFrame): - X_val = pd.DataFrame(X_val) - - if not X_val: - X_train, X_val, y_train, y_val = self.split_data( - X, y, val_size, random_state - ) - - data_module = self.preprocess_data( - X_train, y_train, X_val, y_val, batch_size, shuffle - ) - - self.model = BaseMambularLSS( - family=family, - config=self.config, - cat_feature_info=self.cat_feature_info, - num_feature_info=self.num_feature_info, - lr=lr, - lr_patience=lr_patience, - lr_factor=factor, - weight_decay=weight_decay, - ) - - early_stop_callback = EarlyStopping( - monitor=monitor, min_delta=0.00, patience=patience, verbose=False, mode=mode - ) - - checkpoint_callback = ModelCheckpoint( - monitor="val_loss", - mode="min", - save_top_k=1, - dirpath="model_checkpoints", - filename="best_model", - ) - - # Initialize the trainer and train the model - trainer = pl.Trainer( - max_epochs=max_epochs, - callbacks=[early_stop_callback, checkpoint_callback], - **trainer_kwargs, - ) - trainer.fit(self.model, data_module) - - best_model_path = checkpoint_callback.best_model_path - if best_model_path: - checkpoint = torch.load(best_model_path) - self.model.load_state_dict(checkpoint["state_dict"]) - - return self - - def predict(self, X): - """ - Predicts target values for the given input samples using the fitted model. - - Parameters - ---------- - X : DataFrame or array-like, shape (n_samples, n_features) - The input samples for which to predict target values. - - Returns - ------- - predictions : ndarray, shape (n_samples,) or (n_samples, n_distributional_parameters) - The predicted target values. - """ - # Preprocess the data - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - device = next(self.model.parameters()).device - cat_tensors, num_tensors = self.preprocess_test_data(X) - if isinstance(cat_tensors, list): - cat_tensors = [tensor.to(device) for tensor in cat_tensors] - else: - cat_tensors = cat_tensors.to(device) - - if isinstance(num_tensors, list): - num_tensors = [tensor.to(device) for tensor in num_tensors] - else: - num_tensors = num_tensors.to(device) - - # Set the model to evaluation mode - self.model.eval() - - # Perform inference - with torch.no_grad(): - predictions = self.model(cat_tensors, num_tensors) - - # Convert predictions to NumPy array and return - return predictions.cpu().numpy() - - def evaluate(self, X, y_true, metrics=None, distribution_family=None): - """ - Evaluate the model on the given data using specified metrics tailored to the distribution type. - - Parameters - ---------- - X : DataFrame or array-like, shape (n_samples, n_features) - Input samples. - y_true : DataFrame or array-like, shape (n_samples,) or (n_samples, n_outputs) - True target values. - metrics : dict, optional - A dictionary where keys are metric names and values are the metric functions. - If None, default metrics based on the detected or specified distribution_family are used. - distribution_family : str, optional - Specifies the distribution family the model is predicting for. If None, it will attempt to infer based - on the model's settings. - - Returns - ------- - scores : dict - A dictionary with metric names as keys and their corresponding scores as values. - """ - # Infer distribution family from model settings if not provided - if distribution_family is None: - distribution_family = getattr(self.model, "distribution_family", "normal") - - # Setup default metrics if none are provided - if metrics is None: - metrics = self.get_default_metrics(distribution_family) - - # Make predictions - predictions = self.predict(X) - - # Initialize dictionary to store results - scores = {} - - # Compute each metric - for metric_name, metric_func in metrics.items(): - scores[metric_name] = metric_func(y_true, predictions) - - return scores - - def get_default_metrics(self, distribution_family): - """ - Provides default metrics based on the distribution family. - - Parameters - ---------- - distribution_family : str - The distribution family for which to provide default metrics. - - Returns - ------- - metrics : dict - A dictionary of default metric functions. - """ - default_metrics = { - "normal": { - "MSE": lambda y, pred: mean_squared_error(y, pred[:, 0]), - "CRPS": lambda y, pred: np.mean( - [ - ps.crps_gaussian(y[i], mu=pred[i, 0], sig=np.sqrt(pred[i, 1])) - for i in range(len(y)) - ] - ), - }, - "poisson": {"Poisson Deviance": poisson_deviance}, - "gamma": {"Gamma Deviance": gamma_deviance}, - "beta": {"Brier Score": beta_brier_score}, - "dirichlet": {"Dirichlet Error": dirichlet_error}, - "studentt": {"Student-T Loss": student_t_loss}, - "negativebinom": {"Negative Binomial Deviance": negative_binomial_deviance}, - "inversegamma": {"Inverse Gamma Loss": inverse_gamma_loss}, - "categorical": {"Accuracy": accuracy_score}, - } - return default_metrics.get(distribution_family, {})
-
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/models/sklearn_embedding_classifier.html b/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/models/sklearn_embedding_classifier.html deleted file mode 100644 index 0004f95..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/models/sklearn_embedding_classifier.html +++ /dev/null @@ -1,721 +0,0 @@ - - - - - - mambular.models.sklearn_embedding_classifier — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mambular.models.sklearn_embedding_classifier

-from sklearn.model_selection import train_test_split
-import pytorch_lightning as pl
-import torch
-from torch.utils.data import DataLoader
-from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
-from ..base_models.embedding_classifier import BaseEmbeddingMambularClassifier
-from ..utils.config import MambularConfig
-from ..utils.preprocessor import Preprocessor
-from ..utils.dataset import MambularDataModule, EmbeddingMambularDataset
-from sklearn.base import BaseEstimator
-import pandas as pd
-from sklearn.decomposition import PCA
-import numpy as np
-from sklearn.metrics import accuracy_score
-
-
-
[docs]class EmbeddingMambularClassifier(BaseEstimator): - """ - Provides an scikit-learn-like interface for the ProteinMambularClassifier, making it compatible with - scikit-learn's utilities and workflow. This class encapsulates the PyTorch Lightning model, preprocessing, - and data loading, offering methods for fitting, predicting, and probability estimation in a manner akin - to scikit-learn's API. - - Parameters - ---------- - **kwargs : Configuration parameters that can include both MambularConfig settings and preprocessing - options. Any unrecognized parameters are passed to the preprocessor. - - Attributes - ---------- - config : MambularConfig - Configuration object for the model, storing architecture-specific parameters. - preprocessor : Preprocessor - Object handling data preprocessing steps such as feature encoding and normalization. - model : ProteinMambularClassifier - The underlying neural network model, instantiated during the `fit` method. - """ - - def __init__(self, **kwargs): - # Known config arguments - config_arg_names = [ - "d_model", - "n_layers", - "dt_rank", - "output_dimension", - "pooling_method", - "norm", - "cls", - "dt_min", - "dt_max", - "dropout", - "bias", - "weight_decay", - "conv_bias", - "d_state", - "expand_factor", - "d_conv", - "dt_init", - "dt_scale", - "dt_init_floor", - ] - config_kwargs = {k: v for k, v in kwargs.items() if k in config_arg_names} - self.config = MambularConfig(**config_kwargs) - - # The rest are assumed to be preprocessor arguments - preprocessor_kwargs = { - k: v for k, v in kwargs.items() if k not in config_arg_names - } - self.preprocessor = Preprocessor(**preprocessor_kwargs) - self.model = None - - def get_params(self, deep=True): - """ - Get parameters for this estimator. - - Parameters - ---------- - deep : bool, default=True - If True, will return the parameters for this estimator and contained subobjects that are estimators. - - Returns - ------- - params : dict - Parameter names mapped to their values. - """ - params = self.config_kwargs # Parameters used to initialize MambularConfig - - # If deep=True, include parameters from nested components like preprocessor - if deep: - # Assuming Preprocessor has a get_params method - preprocessor_params = { - "preprocessor__" + key: value - for key, value in self.preprocessor.get_params().items() - } - params.update(preprocessor_params) - - return params - - def set_params(self, **parameters): - """ - Set the parameters of this estimator. - - Parameters - ---------- - **parameters : dict - Estimator parameters. - - Returns - ------- - self : object - Estimator instance. - """ - # Update config_kwargs with provided parameters - valid_config_keys = self.config_kwargs.keys() - config_updates = {k: v for k, v in parameters.items() if k in valid_config_keys} - self.config_kwargs.update(config_updates) - - # Update the config object - for key, value in config_updates.items(): - setattr(self.config, key, value) - - # Handle preprocessor parameters (prefixed with 'preprocessor__') - preprocessor_params = { - k.split("__")[1]: v - for k, v in parameters.items() - if k.startswith("preprocessor__") - } - if preprocessor_params: - # Assuming Preprocessor has a set_params method - self.preprocessor.set_params(**preprocessor_params) - - return self - - def split_data(self, X, y, val_size, random_state): - """ - Split the dataset into training and validation sets. - - Parameters - ---------- - X : array-like of shape (n_samples, n_features) - The input samples. - y : array-like of shape (n_samples,) - The target values. - val_size : float - The proportion of the dataset to include in the validation split. - random_state : int - Controls the shuffling applied to the data before applying the split. - - Returns - ------- - X_train, X_val, y_train, y_val : arrays - The split datasets. - """ - X_train, X_val, y_train, y_val = train_test_split( - X, y, test_size=val_size, random_state=random_state - ) - - return X_train, X_val, y_train, y_val - - def preprocess_data(self, X_train, y_train, X_val, y_val, batch_size, shuffle): - """ - Preprocess the training and validation data and create corresponding DataLoaders. - - Parameters - ---------- - X_train : array-like of shape (n_samples, n_features) - The training input samples. - y_train : array-like of shape (n_samples,) - The training target values. - X_val : array-like of shape (n_samples, n_features) - The validation input samples. - y_val : array-like of shape (n_samples,) - The validation target values. - batch_size : int - Size of mini-batches for the DataLoader. - shuffle : bool - Whether to shuffle the training data before splitting into batches. - - Returns - ------- - data_module : MambularDataModule - An instance of MambularDataModule containing training and validation DataLoaders. - """ - train_preprocessed_data = self.preprocessor.fit_transform(X_train, y_train) - val_preprocessed_data = self.preprocessor.transform(X_val) - - # Update feature info based on the actual processed data - ( - self.cat_feature_info, - self.num_feature_info, - ) = self.preprocessor.get_feature_info() - - # Initialize lists for tensors - train_cat_tensors = [] - train_num_tensors = [] - val_cat_tensors = [] - val_num_tensors = [] - - # Populate tensors for categorical features, if present in processed data - for key in self.cat_feature_info: - cat_key = "cat_" + key # Assuming categorical keys are prefixed with 'cat_' - if cat_key in train_preprocessed_data: - train_cat_tensors.append( - torch.tensor(train_preprocessed_data[cat_key], dtype=torch.long) - ) - if cat_key in val_preprocessed_data: - val_cat_tensors.append( - torch.tensor(val_preprocessed_data[cat_key], dtype=torch.long) - ) - - binned_key = "num_" + key # for binned features - if binned_key in train_preprocessed_data: - train_cat_tensors.append( - torch.tensor(train_preprocessed_data[binned_key], dtype=torch.long) - ) - - if binned_key in val_preprocessed_data: - val_cat_tensors.append( - torch.tensor(val_preprocessed_data[binned_key], dtype=torch.long) - ) - - # Populate tensors for numerical features, if present in processed data - for key in self.num_feature_info: - num_key = "num_" + str( - key - ) # Assuming numerical keys are prefixed with 'num_' - if num_key in train_preprocessed_data: - train_num_tensors.append( - torch.tensor(train_preprocessed_data[num_key], dtype=torch.float) - ) - if num_key in val_preprocessed_data: - val_num_tensors.append( - torch.tensor(val_preprocessed_data[num_key], dtype=torch.float) - ) - - train_labels = torch.tensor(y_train, dtype=torch.long) - val_labels = torch.tensor(y_val, dtype=torch.long) - - # Create datasets - train_dataset = EmbeddingMambularDataset( - train_cat_tensors, train_num_tensors, train_labels, regression=False - ) - val_dataset = EmbeddingMambularDataset( - val_cat_tensors, val_num_tensors, val_labels, regression=False - ) - - # Create dataloaders - train_dataloader = DataLoader( - train_dataset, batch_size=batch_size, shuffle=shuffle - ) - val_dataloader = DataLoader(val_dataset, batch_size=batch_size) - - return MambularDataModule(train_dataloader, val_dataloader) - - def preprocess_test_data(self, X): - """ - Preprocesses the test data and creates tensors for categorical and numerical features. - - Parameters - ---------- - X : DataFrame or array-like, shape (n_samples, n_features) - Test feature set. - - Returns - ------- - cat_tensors : list of Tensors - List of tensors for each categorical feature. - num_tensors : list of Tensors - List of tensors for each numerical feature. - """ - processed_data = self.preprocessor.transform(X) - - # Initialize lists for tensors - cat_tensors = [] - num_tensors = [] - - # Populate tensors for categorical features - for key in self.cat_feature_info: - cat_key = "cat_" + str( - key - ) # Assuming categorical keys are prefixed with 'cat_' - if cat_key in processed_data: - cat_tensors.append( - torch.tensor(processed_data[cat_key], dtype=torch.long) - ) - - binned_key = "num_" + str(key) # for binned features - if binned_key in processed_data: - cat_tensors.append( - torch.tensor(processed_data[binned_key], dtype=torch.long) - ) - - # Populate tensors for numerical features - for key in self.num_feature_info: - num_key = "num_" + str( - key - ) # Assuming numerical keys are prefixed with 'num_' - if num_key in processed_data: - num_tensors.append( - torch.tensor(processed_data[num_key], dtype=torch.float) - ) - - return cat_tensors, num_tensors - - def fit( - self, - X, - y, - val_size=0.2, - X_val=None, - y_val=None, - max_epochs=100, - random_state=101, - batch_size=64, - shuffle=True, - patience=10, - monitor="val_loss", - mode="min", - lr=1e-3, - lr_patience=10, - factor=0.75, - weight_decay=0.025, - raw_embeddings=False, - seq_size=20, - pca=False, - reduced_dims=16, - **trainer_kwargs - ): - """ - Fits the model to the given dataset. - - Parameters - ---------- - X : pandas DataFrame or array-like - Feature matrix for training. - y : array-like - Target vector. - val_size : float, optional - Fraction of the data to use for validation if X_val is None. - X_val : pandas DataFrame or array-like, optional - Feature matrix for validation. - y_val : array-like, optional - Target vector for validation. - max_epochs : int, default=100 - Maximum number of epochs for training. - random_state : int, optional - Seed for random number generators. - batch_size : int, default=32 - Size of batches for training and validation. - shuffle : bool, default=True - Whether to shuffle training data before each epoch. - patience : int, default=10 - Patience for early stopping based on val_loss. - monitor : str, default='val_loss' - Metric to monitor for early stopping. - mode : str, default='min' - Mode for early stopping ('min' or 'max'). - lr : float, default=0.001 - Learning rate for the optimizer. - lr_patience : int, default=5 - Patience for learning rate reduction. - factor : float, default=0.1 - Factor for learning rate reduction. - weight_decay : float, default=0.0 - Weight decay for the optimizer. - raw_embeddings : bool, default=False - Whether to use raw features or embeddings. - seq_size : int, optional - Sequence size for embeddings, relevant if raw_embeddings is False. - **trainer_kwargs : dict - Additional arguments for the PyTorch Lightning Trainer. - - Returns - ------- - self : object - The fitted estimator. - """ - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - if X_val: - if not isinstance(X_val, pd.DataFrame): - X_val = pd.DataFrame(X_val) - - # Apply PCA if indicated - if pca: - pca_transformer = PCA(n_components=reduced_dims) - X = pca_transformer.fit_transform( - X - ) # Fit and transform the PCA on the complete dataset - if X_val is not None: - X_val = pca_transformer.transform( - X_val - ) # Transform validation data with the same PCA model - - raw_embeddings = True - - if not X_val: - X_train, X_val, y_train, y_val = self.split_data( - X, y, val_size, random_state - ) - else: - X_train = X - y_train = y - - data_module = self.preprocess_data( - X_train, y_train, X_val, y_val, batch_size, shuffle - ) - - if raw_embeddings: - self.config.d_model = X.shape[1] - - num_classes = len(np.unique(y)) - - self.model = BaseEmbeddingMambularClassifier( - num_classes=num_classes, - config=self.config, - cat_feature_info=self.cat_feature_info, - num_feature_info=self.num_feature_info, - lr=lr, - lr_patience=lr_patience, - lr_factor=factor, - weight_decay=weight_decay, - raw_embeddings=raw_embeddings, - seq_size=seq_size, - ) - - early_stop_callback = EarlyStopping( - monitor=monitor, min_delta=0.00, patience=patience, verbose=False, mode=mode - ) - - checkpoint_callback = ModelCheckpoint( - monitor="val_loss", # Adjust according to your validation metric - mode="min", - save_top_k=1, - dirpath="model_checkpoints", # Specify the directory to save checkpoints - filename="best_model", - ) - - # Initialize the trainer and train the model - trainer = pl.Trainer( - max_epochs=max_epochs, - callbacks=[early_stop_callback, checkpoint_callback], - **trainer_kwargs - ) - trainer.fit(self.model, data_module) - - best_model_path = checkpoint_callback.best_model_path - if best_model_path: - checkpoint = torch.load(best_model_path) - self.model.load_state_dict(checkpoint["state_dict"]) - - return self - - def predict(self, X): - """ - Predict the class labels for the given input samples. - - Parameters - ---------- - X : array-like or pd.DataFrame of shape (n_samples, n_features) - The input samples to predict. - - Returns - ------- - predictions : ndarray of shape (n_samples,) - Predicted class labels for each input sample. - - Notes - ----- - The method preprocesses the input data using the same preprocessor used during training, - sets the model to evaluation mode, and then performs inference to predict the class labels. - The predictions are converted from a PyTorch tensor to a NumPy array before being returned. - """ - # Preprocess the data - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - - if hasattr(self, "pca_transformer"): - X = pd.DataFrame(self.pca_transformer.transform(X)) - - cat_tensors, num_tensors = self.preprocess_test_data(X) - device = next(self.model.parameters()).device - cat_tensors, num_tensors = self.preprocess_test_data(X) - if isinstance(cat_tensors, list): - cat_tensors = [tensor.to(device) for tensor in cat_tensors] - else: - cat_tensors = cat_tensors.to(device) - - if isinstance(num_tensors, list): - num_tensors = [tensor.to(device) for tensor in num_tensors] - else: - num_tensors = num_tensors.to(device) - - # Set the model to evaluation mode - self.model.eval() - - # Perform inference - with torch.no_grad(): - logits = self.model(cat_tensors, num_tensors) - predictions = torch.argmax(logits, dim=1) - - # Convert predictions to NumPy array and return - return predictions.cpu().numpy() - - def predict_proba(self, X): - """ - Predict class probabilities for the given input samples. - - Parameters - ---------- - X : array-like or pd.DataFrame of shape (n_samples, n_features) - The input samples for which to predict class probabilities. - - Returns - ------- - probabilities : ndarray of shape (n_samples, n_classes) - Predicted class probabilities for each input sample. - - Notes - ----- - The method preprocesses the input data using the same preprocessor used during training, - sets the model to evaluation mode, and then performs inference to predict the class probabilities. - Softmax is applied to the logits to obtain probabilities, which are then converted from a PyTorch tensor - to a NumPy array before being returned. - """ - # Preprocess the data - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - cat_tensors, num_tensors = self.preprocess_test_data(X) - device = next(self.model.parameters()).device - cat_tensors, num_tensors = self.preprocess_test_data(X) - if isinstance(cat_tensors, list): - cat_tensors = [tensor.to(device) for tensor in cat_tensors] - else: - cat_tensors = cat_tensors.to(device) - - if isinstance(num_tensors, list): - num_tensors = [tensor.to(device) for tensor in num_tensors] - else: - num_tensors = num_tensors.to(device) - - # Set the model to evaluation mode - self.model.eval() - - # Perform inference - with torch.no_grad(): - logits = self.model(cat_tensors, num_tensors) - probabilities = torch.softmax(logits, dim=1) - - # Convert probabilities to NumPy array and return - return probabilities.cpu().numpy() - - def evaluate(self, X, y_true, metrics=None): - """ - Evaluate the model on the given data using specified metrics. - - Parameters - ---------- - X : array-like or pd.DataFrame of shape (n_samples, n_features) - The input samples to predict. - y_true : array-like of shape (n_samples,) - The true class labels against which to evaluate the predictions. - metrics : dict - A dictionary where keys are metric names and values are tuples containing the metric function - and a boolean indicating whether the metric requires probability scores (True) or class labels (False). - - Returns - ------- - scores : dict - A dictionary with metric names as keys and their corresponding scores as values. - - Notes - ----- - This method uses either the `predict` or `predict_proba` method depending on the metric requirements. - """ - # Ensure input is in the correct format - if metrics is None: - metrics = {"Accuracy": (accuracy_score, False)} - - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - - # Initialize dictionary to store results - scores = {} - - # Generate class probabilities if any metric requires them - if any(use_proba for _, use_proba in metrics.values()): - probabilities = self.predict_proba(X) - - # Generate class labels if any metric requires them - if any(not use_proba for _, use_proba in metrics.values()): - predictions = self.predict(X) - - # Compute each metric - for metric_name, (metric_func, use_proba) in metrics.items(): - if use_proba: - scores[metric_name] = metric_func(y_true, probabilities) - else: - scores[metric_name] = metric_func(y_true, predictions) - - return scores
-
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/models/sklearn_embedding_regressor.html b/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/models/sklearn_embedding_regressor.html deleted file mode 100644 index 704112c..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/models/sklearn_embedding_regressor.html +++ /dev/null @@ -1,665 +0,0 @@ - - - - - - mambular.models.sklearn_embedding_regressor — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mambular.models.sklearn_embedding_regressor

-from sklearn.model_selection import train_test_split
-import pytorch_lightning as pl
-import torch
-from torch.utils.data import DataLoader
-from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
-from ..base_models.embedding_regressor import BaseEmbeddingMambularRegressor
-from ..utils.config import MambularConfig
-from ..utils.preprocessor import Preprocessor
-from ..utils.dataset import MambularDataModule, EmbeddingMambularDataset
-from sklearn.base import BaseEstimator
-import pandas as pd
-from sklearn.decomposition import PCA
-from sklearn.metrics import mean_squared_error
-
-
-
[docs]class EmbeddingMambularRegressor(BaseEstimator): - """ - An sklearn-like interface for the ProteinMambularRegressor, making it compatible with sklearn's utilities - and workflows. This class wraps the PyTorch Lightning model and preprocessor, providing methods for fitting, - predicting, and setting/getting parameters in a way that mimics sklearn's API. - - Parameters - ---------- - **kwargs : Keyword arguments that can include both configuration parameters for the MambularConfig and - parameters for the preprocessor. - - Attributes - ---------- - config : MambularConfig - Configuration object containing model-specific parameters. - preprocessor : Preprocessor - Preprocessor object for data preprocessing steps. - model : ProteinMambularRegressor - The neural network model, initialized after the `fit` method is called. - """ - - def __init__(self, **kwargs): - # Known config arguments - config_arg_names = [ - "d_model", - "n_layers", - "dt_rank", - "output_dimension", - "pooling_method", - "norm", - "cls", - "dt_min", - "dt_max", - "dropout", - "bias", - "weight_decay", - "conv_bias", - "d_state", - "expand_factor", - "d_conv", - "dt_init", - "dt_scale", - "dt_init_floor", - ] - config_kwargs = {k: v for k, v in kwargs.items() if k in config_arg_names} - self.config = MambularConfig(**config_kwargs) - - # The rest are assumed to be preprocessor arguments - preprocessor_kwargs = { - k: v for k, v in kwargs.items() if k not in config_arg_names - } - - if not "numerical_preprocessing" in preprocessor_kwargs.keys(): - preprocessor_kwargs["numerical_preprocessing"] = "normalization" - self.preprocessor = Preprocessor(**preprocessor_kwargs) - self.model = None - - def get_params(self, deep=True): - """ - Get parameters for this estimator. Overrides the BaseEstimator method. - - Parameters - ---------- - deep : bool, default=True - If True, returns the parameters for this estimator and contained sub-objects that are estimators. - - Returns - ------- - params : dict - Parameter names mapped to their values. - """ - params = self.config_kwargs # Parameters used to initialize MambularConfig - - # If deep=True, include parameters from nested components like preprocessor - if deep: - # Assuming Preprocessor has a get_params method - preprocessor_params = { - "preprocessor__" + key: value - for key, value in self.preprocessor.get_params().items() - } - params.update(preprocessor_params) - - return params - - def set_params(self, **parameters): - """ - Set the parameters of this estimator. Overrides the BaseEstimator method. - - Parameters - ---------- - **parameters : dict - Estimator parameters to be set. - - Returns - ------- - self : object - The instance with updated parameters. - """ - # Update config_kwargs with provided parameters - valid_config_keys = self.config_kwargs.keys() - config_updates = {k: v for k, v in parameters.items() if k in valid_config_keys} - self.config_kwargs.update(config_updates) - - # Update the config object - for key, value in config_updates.items(): - setattr(self.config, key, value) - - # Handle preprocessor parameters (prefixed with 'preprocessor__') - preprocessor_params = { - k.split("__")[1]: v - for k, v in parameters.items() - if k.startswith("preprocessor__") - } - if preprocessor_params: - # Assuming Preprocessor has a set_params method - self.preprocessor.set_params(**preprocessor_params) - - return self - - def split_data(self, X, y, val_size, random_state): - """ - Splits the dataset into training and validation sets. - - Parameters - ---------- - X : array-like or DataFrame, shape (n_samples, n_features) - Input features. - y : array-like, shape (n_samples,) or (n_samples, n_targets) - Target values. - val_size : float - The proportion of the dataset to include in the validation split. - random_state : int - Controls the shuffling applied to the data before applying the split. - - Returns - ------- - X_train, X_val, y_train, y_val : arrays - The split datasets. - """ - X_train, X_val, y_train, y_val = train_test_split( - X, y, test_size=val_size, random_state=random_state - ) - - return X_train, X_val, y_train, y_val - - def preprocess_data(self, X_train, y_train, X_val, y_val, batch_size, shuffle): - """ - Preprocesses the training and validation data, and creates DataLoaders for them. - - Parameters - ---------- - X_train : DataFrame or array-like, shape (n_samples_train, n_features) - Training feature set. - y_train : array-like, shape (n_samples_train,) - Training target values. - X_val : DataFrame or array-like, shape (n_samples_val, n_features) - Validation feature set. - y_val : array-like, shape (n_samples_val,) - Validation target values. - batch_size : int - Size of batches for the DataLoader. - shuffle : bool - Whether to shuffle the training data in the DataLoader. - - Returns - ------- - data_module : MambularDataModule - An instance of MambularDataModule containing the training and validation DataLoaders. - """ - train_preprocessed_data = self.preprocessor.fit_transform(X_train, y_train) - val_preprocessed_data = self.preprocessor.transform(X_val) - - # Update feature info based on the actual processed data - ( - self.cat_feature_info, - self.num_feature_info, - ) = self.preprocessor.get_feature_info() - - # Initialize lists for tensors - train_cat_tensors = [] - train_num_tensors = [] - val_cat_tensors = [] - val_num_tensors = [] - - # Populate tensors for categorical features, if present in processed data - for key in self.cat_feature_info: - cat_key = "cat_" + key # Assuming categorical keys are prefixed with 'cat_' - if cat_key in train_preprocessed_data: - train_cat_tensors.append( - torch.tensor(train_preprocessed_data[cat_key], dtype=torch.long) - ) - if cat_key in val_preprocessed_data: - val_cat_tensors.append( - torch.tensor(val_preprocessed_data[cat_key], dtype=torch.long) - ) - - binned_key = "num_" + key # for binned features - if binned_key in train_preprocessed_data: - train_cat_tensors.append( - torch.tensor(train_preprocessed_data[binned_key], dtype=torch.long) - ) - - if binned_key in val_preprocessed_data: - val_cat_tensors.append( - torch.tensor(val_preprocessed_data[binned_key], dtype=torch.long) - ) - - # Populate tensors for numerical features, if present in processed data - for key in self.num_feature_info: - num_key = "num_" + str( - key - ) # Assuming numerical keys are prefixed with 'num_' - if num_key in train_preprocessed_data: - train_num_tensors.append( - torch.tensor(train_preprocessed_data[num_key], dtype=torch.float) - ) - if num_key in val_preprocessed_data: - val_num_tensors.append( - torch.tensor(val_preprocessed_data[num_key], dtype=torch.float) - ) - - train_labels = torch.tensor(y_train, dtype=torch.float) - val_labels = torch.tensor(y_val, dtype=torch.float) - - # Create datasets - train_dataset = EmbeddingMambularDataset( - train_cat_tensors, train_num_tensors, train_labels - ) - val_dataset = EmbeddingMambularDataset( - val_cat_tensors, val_num_tensors, val_labels - ) - - # Create dataloaders - train_dataloader = DataLoader( - train_dataset, batch_size=batch_size, shuffle=shuffle - ) - val_dataloader = DataLoader(val_dataset, batch_size=batch_size) - - return MambularDataModule(train_dataloader, val_dataloader) - - def preprocess_test_data(self, X): - """ - Preprocesses the test data and creates tensors for categorical and numerical features. - - Parameters - ---------- - X : DataFrame or array-like, shape (n_samples, n_features) - Test feature set. - - Returns - ------- - cat_tensors : list of Tensors - List of tensors for each categorical feature. - num_tensors : list of Tensors - List of tensors for each numerical feature. - """ - processed_data = self.preprocessor.transform(X) - - # Initialize lists for tensors - cat_tensors = [] - num_tensors = [] - - # Populate tensors for categorical features - for key in self.cat_feature_info: - cat_key = "cat_" + str( - key - ) # Assuming categorical keys are prefixed with 'cat_' - if cat_key in processed_data: - cat_tensors.append( - torch.tensor(processed_data[cat_key], dtype=torch.long) - ) - - binned_key = "num_" + str(key) # for binned features - if binned_key in processed_data: - cat_tensors.append( - torch.tensor(processed_data[binned_key], dtype=torch.long) - ) - - # Populate tensors for numerical features - for key in self.num_feature_info: - num_key = "num_" + str( - key - ) # Assuming numerical keys are prefixed with 'num_' - if num_key in processed_data: - num_tensors.append( - torch.tensor(processed_data[num_key], dtype=torch.float) - ) - - return cat_tensors, num_tensors - - def fit( - self, - X, - y, - val_size=0.2, - X_val=None, - y_val=None, - max_epochs=100, - random_state=101, - batch_size=64, - shuffle=True, - patience=10, - monitor="val_loss", - mode="min", - lr=1e-3, - lr_patience=10, - factor=0.75, - weight_decay=0.025, - raw_embeddings=False, - seq_size=20, - pca=False, - **trainer_kwargs - ): - """ - Fits the ProteinMambularRegressor model to the training data. - - Parameters - ---------- - X : array-like or DataFrame - The training input samples. - y : array-like - The target values (class labels for classification, real numbers for regression). - val_size : float, optional - The proportion of the dataset to include in the validation split if `X_val` is not provided. - X_val : array-like or DataFrame, optional - The validation input samples. - y_val : array-like, optional - The validation target values. - max_epochs : int, optional - The maximum number of epochs for training. - random_state : int, optional - The seed used by the random number generator. - batch_size : int, optional - Size of the batches for training. - shuffle : bool, optional - Whether to shuffle the training data. - patience : int, optional - Patience for early stopping. - monitor : str, optional - Quantity to be monitored for early stopping. - mode : str, optional - One of {'auto', 'min', 'max'}. In 'min' mode, training will stop when the quantity monitored has stopped decreasing; - in 'max' mode, it will stop when the quantity monitored has stopped increasing. - lr : float, optional - Learning rate for the optimizer. - lr_patience : int, optional - Number of epochs with no improvement after which the learning rate will be reduced. - factor : float, optional - Factor by which the learning rate will be reduced. - weight_decay : float, optional - Weight decay coefficient for regularization in the optimizer. - raw_embeddings : bool, optional - Whether to use raw numerical features directly or to process them into embeddings. - seq_size : int, optional - The sequence size for processing numerical features when not using raw embeddings. - **trainer_kwargs : dict - Additional keyword arguments for the PyTorch Lightning Trainer. - - Returns - ------- - self : object - Returns an instance of self. - """ - - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - if X_val: - if not isinstance(X_val, pd.DataFrame): - X_val = pd.DataFrame(X_val) - - # Apply PCA if indicated - if pca: - self.pca_transformer = PCA(n_components=seq_size) - X = pd.DataFrame( - self.pca_transformer.fit_transform(X) - ) # Fit and transform the PCA on the complete dataset - if X_val is not None: - X_val = pd.DataFrame( - self.pca_transformer.transform(X_val) - ) # Transform validation data with the same PCA model - - raw_embeddings = True - - if not X_val: - X_train, X_val, y_train, y_val = self.split_data( - X, y, val_size, random_state - ) - else: - X_train = X - y_train = y - - data_module = self.preprocess_data( - X_train, y_train, X_val, y_val, batch_size, shuffle - ) - - if raw_embeddings: - self.config.d_model = X.shape[1] - - self.model = BaseEmbeddingMambularRegressor( - config=self.config, - cat_feature_info=self.cat_feature_info, - num_feature_info=self.num_feature_info, - lr=lr, - lr_patience=lr_patience, - lr_factor=factor, - weight_decay=weight_decay, - raw_embeddings=raw_embeddings, - seq_size=seq_size, - ) - - early_stop_callback = EarlyStopping( - monitor=monitor, min_delta=0.00, patience=patience, verbose=False, mode=mode - ) - - checkpoint_callback = ModelCheckpoint( - monitor="val_loss", # Adjust according to your validation metric - mode="min", - save_top_k=1, - dirpath="model_checkpoints", # Specify the directory to save checkpoints - filename="best_model", - ) - - # Initialize the trainer and train the model - trainer = pl.Trainer( - max_epochs=max_epochs, - callbacks=[early_stop_callback, checkpoint_callback], - **trainer_kwargs - ) - trainer.fit(self.model, data_module) - - best_model_path = checkpoint_callback.best_model_path - if best_model_path: - checkpoint = torch.load(best_model_path) - self.model.load_state_dict(checkpoint["state_dict"]) - - return self - - def predict(self, X): - """ - Predicts target values for the given input samples. - - Parameters - ---------- - X : DataFrame or array-like, shape (n_samples, n_features) - The input samples for which to predict target values. - - Returns - ------- - predictions : ndarray, shape (n_samples,) or (n_samples, n_outputs) - The predicted target values. - """ - # Preprocess the data - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - - if hasattr(self, "pca_transformer"): - X = pd.DataFrame(self.pca_transformer.transform(X)) - - device = next(self.model.parameters()).device - cat_tensors, num_tensors = self.preprocess_test_data(X) - if isinstance(cat_tensors, list): - cat_tensors = [tensor.to(device) for tensor in cat_tensors] - else: - cat_tensors = cat_tensors.to(device) - - if isinstance(num_tensors, list): - num_tensors = [tensor.to(device) for tensor in num_tensors] - else: - num_tensors = num_tensors.to(device) - - # Set the model to evaluation mode - self.model.eval() - - # Perform inference - with torch.no_grad(): - predictions = self.model(cat_tensors, num_tensors) - - # Convert predictions to NumPy array and return - return predictions.cpu().numpy() - - def evaluate(self, X, y_true, metrics=None): - """ - Evaluate the model on the given data using specified metrics. - - Example: - metrics = { - 'Mean Squared Error': mean_squared_error, - 'R2 Score': r2_score - } - - # Assuming 'X_test' and 'y_test' are your test dataset and labels - # Evaluate using the specified metrics - results = regressor.evaluate(X_test, y_test, metrics=metrics) - - Parameters - ---------- - X : array-like or pd.DataFrame of shape (n_samples, n_features) - The input samples to predict. - y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) - The true target values against which to evaluate the predictions. - metrics : dict - A dictionary where keys are metric names and values are the metric functions. - - Returns - ------- - scores : dict - A dictionary with metric names as keys and their corresponding scores as values. - - Notes - ----- - This method uses the `predict` method to generate predictions and computes each metric. - """ - if metrics is None: - metrics = {"Mean Squared Error": mean_squared_error} - - # Ensure input is in the correct format - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - - # Generate predictions using the trained model - predictions = self.predict(X) - - # Initialize dictionary to store results - scores = {} - - # Compute each metric - for metric_name, metric_func in metrics.items(): - scores[metric_name] = metric_func(y_true, predictions) - - return scores
-
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/models/sklearn_regressor.html b/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/models/sklearn_regressor.html deleted file mode 100644 index b7cda36..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_modules/mambular/models/sklearn_regressor.html +++ /dev/null @@ -1,626 +0,0 @@ - - - - - - mambular.models.sklearn_regressor — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mambular.models.sklearn_regressor

-from sklearn.model_selection import train_test_split
-import pytorch_lightning as pl
-import torch
-from torch.utils.data import DataLoader
-from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
-from ..base_models.regressor import BaseMambularRegressor
-from ..utils.config import MambularConfig
-from ..utils.preprocessor import Preprocessor
-from ..utils.dataset import MambularDataModule, MambularDataset
-from sklearn.base import BaseEstimator
-import pandas as pd
-from sklearn.metrics import mean_squared_error
-
-
-
[docs]class MambularRegressor(BaseEstimator): - """ - A regressor implemented using PyTorch Lightning that follows the scikit-learn API conventions. This class is designed - to work with tabular data, offering a straightforward way to specify model configurations and preprocessing steps. It - integrates seamlessly with scikit-learn's tools such as cross-validation and grid search. - - Parameters - ---------- - **kwargs : Various - Accepts any number of keyword arguments. Arguments recognized as model configuration options are passed to the - MambularConfig constructor. Remaining arguments are assumed to be preprocessor options and passed to the - Preprocessor constructor. - - Attributes - ---------- - config : MambularConfig - An object storing the configuration settings for the model. - preprocessor : Preprocessor - An object responsible for preprocessing the input data, such as encoding categorical variables and scaling numerical features. - model : BaseMambularRegressor or None - The underlying regression model, which is a PyTorch Lightning module. It is instantiated when the `fit` method is called. - """ - - def __init__(self, **kwargs): - # Known config arguments - print("Received kwargs:", kwargs) - config_arg_names = [ - "d_model", - "n_layers", - "dt_rank", - "output_dimension", - "pooling_method", - "norm", - "cls", - "dt_min", - "dt_max", - "dropout", - "bias", - "weight_decay", - "conv_bias", - "d_state", - "expand_factor", - "d_conv", - "dt_init", - "dt_scale", - "dt_init_floor", - "tabular_head_units", - "tabular_head_activation", - "tabular_head_dropout", - "num_emebedding_activation", - "layer_norm_after_embedding", - ] - self.config_kwargs = {k: v for k, v in kwargs.items() if k in config_arg_names} - self.config = MambularConfig(**self.config_kwargs) - - # The rest are assumed to be preprocessor arguments - preprocessor_kwargs = { - k: v for k, v in kwargs.items() if k not in config_arg_names - } - self.preprocessor = Preprocessor(**preprocessor_kwargs) - self.model = None - - def get_params(self, deep=True): - """ - Get parameters for this estimator. Overrides the BaseEstimator method. - - Parameters - ---------- - deep : bool, default=True - If True, returns the parameters for this estimator and contained sub-objects that are estimators. - - Returns - ------- - params : dict - Parameter names mapped to their values. - """ - params = self.config_kwargs # Parameters used to initialize MambularConfig - - # If deep=True, include parameters from nested components like preprocessor - if deep: - # Assuming Preprocessor has a get_params method - preprocessor_params = { - "preprocessor__" + key: value - for key, value in self.preprocessor.get_params().items() - } - params.update(preprocessor_params) - - return params - - def set_params(self, **parameters): - """ - Set the parameters of this estimator. Overrides the BaseEstimator method. - - Parameters - ---------- - **parameters : dict - Estimator parameters to be set. - - Returns - ------- - self : object - The instance with updated parameters. - """ - # Update config_kwargs with provided parameters - valid_config_keys = self.config_kwargs.keys() - config_updates = {k: v for k, v in parameters.items() if k in valid_config_keys} - self.config_kwargs.update(config_updates) - - # Update the config object - for key, value in config_updates.items(): - setattr(self.config, key, value) - - # Handle preprocessor parameters (prefixed with 'preprocessor__') - preprocessor_params = { - k.split("__")[1]: v - for k, v in parameters.items() - if k.startswith("preprocessor__") - } - if preprocessor_params: - # Assuming Preprocessor has a set_params method - self.preprocessor.set_params(**preprocessor_params) - - return self - - def split_data(self, X, y, val_size, random_state): - """ - Splits the dataset into training and validation sets. - - Parameters - ---------- - X : array-like or DataFrame, shape (n_samples, n_features) - Input features. - y : array-like, shape (n_samples,) or (n_samples, n_targets) - Target values. - val_size : float - The proportion of the dataset to include in the validation split. - random_state : int - Controls the shuffling applied to the data before applying the split. - - Returns - ------- - X_train, X_val, y_train, y_val : arrays - The split datasets. - """ - X_train, X_val, y_train, y_val = train_test_split( - X, y, test_size=val_size, random_state=random_state - ) - - return X_train, X_val, y_train, y_val - - def preprocess_data(self, X_train, y_train, X_val, y_val, batch_size, shuffle): - """ - Preprocesses the training and validation data, and creates DataLoaders for them. - - Parameters - ---------- - X_train : DataFrame or array-like, shape (n_samples_train, n_features) - Training feature set. - y_train : array-like, shape (n_samples_train,) - Training target values. - X_val : DataFrame or array-like, shape (n_samples_val, n_features) - Validation feature set. - y_val : array-like, shape (n_samples_val,) - Validation target values. - batch_size : int - Size of batches for the DataLoader. - shuffle : bool - Whether to shuffle the training data in the DataLoader. - - Returns - ------- - data_module : MambularDataModule - An instance of MambularDataModule containing the training and validation DataLoaders. - """ - train_preprocessed_data = self.preprocessor.fit_transform(X_train, y_train) - val_preprocessed_data = self.preprocessor.transform(X_val) - - # Update feature info based on the actual processed data - ( - self.cat_feature_info, - self.num_feature_info, - ) = self.preprocessor.get_feature_info() - - # Initialize lists for tensors - train_cat_tensors = [] - train_num_tensors = [] - val_cat_tensors = [] - val_num_tensors = [] - - # Populate tensors for categorical features, if present in processed data - for key in self.cat_feature_info: - cat_key = "cat_" + key # Assuming categorical keys are prefixed with 'cat_' - if cat_key in train_preprocessed_data: - train_cat_tensors.append( - torch.tensor(train_preprocessed_data[cat_key], dtype=torch.long) - ) - if cat_key in val_preprocessed_data: - val_cat_tensors.append( - torch.tensor(val_preprocessed_data[cat_key], dtype=torch.long) - ) - - binned_key = "num_" + key # for binned features - if binned_key in train_preprocessed_data: - train_cat_tensors.append( - torch.tensor(train_preprocessed_data[binned_key], dtype=torch.long) - ) - - if binned_key in val_preprocessed_data: - val_cat_tensors.append( - torch.tensor(val_preprocessed_data[binned_key], dtype=torch.long) - ) - - # Populate tensors for numerical features, if present in processed data - for key in self.num_feature_info: - num_key = "num_" + key # Assuming numerical keys are prefixed with 'num_' - if num_key in train_preprocessed_data: - train_num_tensors.append( - torch.tensor(train_preprocessed_data[num_key], dtype=torch.float) - ) - if num_key in val_preprocessed_data: - val_num_tensors.append( - torch.tensor(val_preprocessed_data[num_key], dtype=torch.float) - ) - - train_labels = torch.tensor(y_train, dtype=torch.float) - val_labels = torch.tensor(y_val, dtype=torch.float) - - # Create datasets - train_dataset = MambularDataset( - train_cat_tensors, train_num_tensors, train_labels - ) - val_dataset = MambularDataset(val_cat_tensors, val_num_tensors, val_labels) - - # Create dataloaders - train_dataloader = DataLoader( - train_dataset, batch_size=batch_size, shuffle=shuffle - ) - val_dataloader = DataLoader(val_dataset, batch_size=batch_size) - - return MambularDataModule(train_dataloader, val_dataloader) - - def preprocess_test_data(self, X): - """ - Preprocesses the test data and creates tensors for categorical and numerical features. - - Parameters - ---------- - X : DataFrame or array-like, shape (n_samples, n_features) - Test feature set. - - Returns - ------- - cat_tensors : list of Tensors - List of tensors for each categorical feature. - num_tensors : list of Tensors - List of tensors for each numerical feature. - """ - processed_data = self.preprocessor.transform(X) - - # Initialize lists for tensors - cat_tensors = [] - num_tensors = [] - - # Populate tensors for categorical features - for key in self.cat_feature_info: - cat_key = "cat_" + key # Assuming categorical keys are prefixed with 'cat_' - if cat_key in processed_data: - cat_tensors.append( - torch.tensor(processed_data[cat_key], dtype=torch.long) - ) - - binned_key = "num_" + key # for binned features - if binned_key in processed_data: - cat_tensors.append( - torch.tensor(processed_data[binned_key], dtype=torch.long) - ) - - # Populate tensors for numerical features - for key in self.num_feature_info: - num_key = "num_" + key # Assuming numerical keys are prefixed with 'num_' - if num_key in processed_data: - num_tensors.append( - torch.tensor(processed_data[num_key], dtype=torch.float) - ) - - return cat_tensors, num_tensors - - def fit( - self, - X, - y, - val_size=0.2, - X_val=None, - y_val=None, - max_epochs=100, - random_state=101, - batch_size=128, - shuffle=True, - patience=10, - monitor="val_loss", - mode="min", - lr=1e-3, - lr_patience=5, - factor=0.75, - weight_decay=1e-06, - **trainer_kwargs - ): - """ - Trains the regression model using the provided training data. Optionally, a separate validation set can be used. - - Parameters - ---------- - X : DataFrame or array-like, shape (n_samples, n_features) - The training input samples. - y : array-like, shape (n_samples,) or (n_samples, n_targets) - The target values (real numbers). - val_size : float, default=0.2 - The proportion of the dataset to include in the validation split if `X_val` is None. Ignored if `X_val` is provided. - X_val : DataFrame or array-like, shape (n_samples, n_features), optional - The validation input samples. If provided, `X` and `y` are not split and this data is used for validation. - y_val : array-like, shape (n_samples,) or (n_samples, n_targets), optional - The validation target values. Required if `X_val` is provided. - max_epochs : int, default=100 - Maximum number of epochs for training. - random_state : int, default=101 - Controls the shuffling applied to the data before applying the split. - batch_size : int, default=64 - Number of samples per gradient update. - shuffle : bool, default=True - Whether to shuffle the training data before each epoch. - patience : int, default=10 - Number of epochs with no improvement on the validation loss to wait before early stopping. - monitor : str, default="val_loss" - The metric to monitor for early stopping. - mode : str, default="min" - Whether the monitored metric should be minimized (`min`) or maximized (`max`). - lr : float, default=1e-3 - Learning rate for the optimizer. - lr_patience : int, default=10 - Number of epochs with no improvement on the validation loss to wait before reducing the learning rate. - factor : float, default=0.75 - Factor by which the learning rate will be reduced. - weight_decay : float, default=0.025 - Weight decay (L2 penalty) coefficient. - **trainer_kwargs : Additional keyword arguments for PyTorch Lightning's Trainer class. - - Returns - ------- - self : object - The fitted regressor. - """ - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - if X_val: - if not isinstance(X_val, pd.DataFrame): - X_val = pd.DataFrame(X_val) - - if not X_val: - X_train, X_val, y_train, y_val = self.split_data( - X, y, val_size, random_state - ) - - data_module = self.preprocess_data( - X_train, y_train, X_val, y_val, batch_size, shuffle - ) - - self.model = BaseMambularRegressor( - config=self.config, - cat_feature_info=self.cat_feature_info, - num_feature_info=self.num_feature_info, - lr=lr, - lr_patience=lr_patience, - lr_factor=factor, - weight_decay=weight_decay, - ) - - early_stop_callback = EarlyStopping( - monitor=monitor, min_delta=0.00, patience=patience, verbose=False, mode=mode - ) - - checkpoint_callback = ModelCheckpoint( - monitor="val_loss", # Adjust according to your validation metric - mode="min", - save_top_k=1, - dirpath="model_checkpoints", # Specify the directory to save checkpoints - filename="best_model", - ) - - # Initialize the trainer and train the model - trainer = pl.Trainer( - max_epochs=max_epochs, - callbacks=[early_stop_callback, checkpoint_callback], - **trainer_kwargs - ) - trainer.fit(self.model, data_module) - - best_model_path = checkpoint_callback.best_model_path - if best_model_path: - checkpoint = torch.load(best_model_path) - self.model.load_state_dict(checkpoint["state_dict"]) - - return self - - def predict(self, X): - """ - Predicts target values for the given input samples. - - Parameters - ---------- - X : DataFrame or array-like, shape (n_samples, n_features) - The input samples for which to predict target values. - - Returns - ------- - predictions : ndarray, shape (n_samples,) or (n_samples, n_outputs) - The predicted target values. - """ - # Preprocess the data - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - device = next(self.model.parameters()).device - cat_tensors, num_tensors = self.preprocess_test_data(X) - if isinstance(cat_tensors, list): - cat_tensors = [tensor.to(device) for tensor in cat_tensors] - else: - cat_tensors = cat_tensors.to(device) - - if isinstance(num_tensors, list): - num_tensors = [tensor.to(device) for tensor in num_tensors] - else: - num_tensors = num_tensors.to(device) - - # Set the model to evaluation mode - self.model.eval() - - # Perform inference - with torch.no_grad(): - predictions = self.model(cat_tensors, num_tensors) - - # Convert predictions to NumPy array and return - return predictions.cpu().numpy() - - def evaluate(self, X, y_true, metrics=None): - """ - Evaluate the model on the given data using specified metrics. - - Example: - metrics = { - 'Mean Squared Error': mean_squared_error, - 'R2 Score': r2_score - } - - # Assuming 'X_test' and 'y_test' are your test dataset and labels - # Evaluate using the specified metrics - results = regressor.evaluate(X_test, y_test, metrics=metrics) - - Parameters - ---------- - X : array-like or pd.DataFrame of shape (n_samples, n_features) - The input samples to predict. - y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) - The true target values against which to evaluate the predictions. - metrics : dict - A dictionary where keys are metric names and values are the metric functions. - - Returns - ------- - scores : dict - A dictionary with metric names as keys and their corresponding scores as values. - - Notes - ----- - This method uses the `predict` method to generate predictions and computes each metric. - """ - if metrics is None: - metrics = {"Mean Squared Error": mean_squared_error} - - # Ensure input is in the correct format - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - - # Generate predictions using the trained model - predictions = self.predict(X) - - # Initialize dictionary to store results - scores = {} - - # Compute each metric - for metric_name, metric_func in metrics.items(): - scores[metric_name] = metric_func(y_true, predictions) - - return scores
-
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/_sources/api/base_models/BaseModels.rst.txt b/docs/$READTHEDOCS_OUTPUT/html/_sources/api/base_models/BaseModels.rst.txt deleted file mode 100644 index 9d20f8b..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_sources/api/base_models/BaseModels.rst.txt +++ /dev/null @@ -1,23 +0,0 @@ -Base Models -=========== - -.. autoclass:: mambular.base_models.classifier.BaseMambularClassifier - :members: - :undoc-members: - :no-inherited-members: -.. autoclass:: mambular.base_models.distributional.BaseMambularLSS - :members: - :undoc-members: - :no-inherited-members: -.. autoclass:: mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier - :members: - :undoc-members: - :no-inherited-members: -.. autoclass:: mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor - :members: - :undoc-members: - :no-inherited-members: -.. autoclass:: mambular.base_models.regressor.BaseMambularRegressor - :members: - :undoc-members: - :no-inherited-members: diff --git a/docs/$READTHEDOCS_OUTPUT/html/_sources/api/base_models/index.rst.txt b/docs/$READTHEDOCS_OUTPUT/html/_sources/api/base_models/index.rst.txt deleted file mode 100644 index 3aae198..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_sources/api/base_models/index.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. -*- mode: rst -*- - -.. currentmodule:: mambular.base_models - -BaseModels -========== - -This module provides base classes for the Mambular models. - -========================================= =================================================================================================== -Functionality Description -========================================= =================================================================================================== -:class:`BaseMambularClassifier` Multi-class and binary classification tasks. -:class:`BaseMambularLSS` Various statistical distribution families for different types of regression and classification tasks. -:class:`Base EmbeddingMambularClassifier` Specialized classification module for complex protein sequence data. -:class:`BaseEmbeddingMambularRegressor` Specialized regression module for complex protein sequence data. -:class:`BaseMambularRegressor` Regression tasks. -========================================= =================================================================================================== - - -.. toctree:: - :maxdepth: 1 - :hidden: - - BaseModels - - - diff --git a/docs/$READTHEDOCS_OUTPUT/html/_sources/api/models/Models.rst.txt b/docs/$READTHEDOCS_OUTPUT/html/_sources/api/models/Models.rst.txt deleted file mode 100644 index 017a589..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_sources/api/models/Models.rst.txt +++ /dev/null @@ -1,8 +0,0 @@ -Models -=========== - -.. autoclass:: mambular.models.sklearn_classifier.MambularClassifier -.. autoclass:: mambular.models.sklearn_distributional.MambularLSS -.. autoclass:: mambular.models.sklearn_embedding_classifier.EmbeddingMambularClassifier -.. autoclass:: mambular.models.sklearn_embedding_regressor.EmbeddingMambularRegressor -.. autoclass:: mambular.models.sklearn_regressor.MambularRegressor diff --git a/docs/$READTHEDOCS_OUTPUT/html/_sources/api/models/index.rst.txt b/docs/$READTHEDOCS_OUTPUT/html/_sources/api/models/index.rst.txt deleted file mode 100644 index 76e901f..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_sources/api/models/index.rst.txt +++ /dev/null @@ -1,25 +0,0 @@ -.. -*- mode: rst -*- - -.. currentmodule:: mambular.models - -Models -====== - -This module provides classes for the Mambular models that adhere to scikit-learn's `BaseEstimator` interface. - -======================================= =================================================================================================== -Functionality Description -======================================= =================================================================================================== -:class:`MambularClassifier` Multi-class and binary classification tasks. -:class:`MambularLSS` Various statistical distribution families for different types of regression and classification tasks. -:class:`EmbeddingMambularClassifier` Specialized classification module for complex protein sequence data. -:class:`EmbeddingMambularRegressor` Specialized regression module for complex protein sequence data. -:class:`MambularRegressor` Regression tasks. -======================================= =================================================================================================== - -.. toctree:: - :maxdepth: 1 - :hidden: - - Models - diff --git a/docs/$READTHEDOCS_OUTPUT/html/_sources/index.rst.txt b/docs/$READTHEDOCS_OUTPUT/html/_sources/index.rst.txt deleted file mode 100644 index 264a8d0..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_sources/index.rst.txt +++ /dev/null @@ -1,27 +0,0 @@ -.. mamba-tabular documentation master file, created by - sphinx-quickstart on Mon May 6 16:16:57 2024. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -.. mdinclude:: mamba.md - -.. toctree:: - :name: Getting Started - :caption: Getting Started - :maxdepth: 2 - :hidden: - - mamba - installation - quickstart - -.. toctree:: - :name: API Docs - :caption: API Docs - :maxdepth: 1 - :hidden: - - - api/models/index - api/base_models/index - diff --git a/docs/$READTHEDOCS_OUTPUT/html/_sources/installation.md.txt b/docs/$READTHEDOCS_OUTPUT/html/_sources/installation.md.txt deleted file mode 100644 index d3e36df..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_sources/installation.md.txt +++ /dev/null @@ -1,14 +0,0 @@ -# Installation - -Please follow the steps below for installing `mambular` - -Install from the source: - -```bash -cd mamba-tabular -pip install . -``` - -Note: Make sure you in the same directory where `setup.py` file resides. - -This package is so far not available in PyPi. diff --git a/docs/$READTHEDOCS_OUTPUT/html/_sources/mamba.md.txt b/docs/$READTHEDOCS_OUTPUT/html/_sources/mamba.md.txt deleted file mode 100644 index 9df572f..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_sources/mamba.md.txt +++ /dev/null @@ -1,136 +0,0 @@ -# Mamba-Tabluar - -Mambular is a Python package that brings the power of Mamba architectures to tabular data, offering a suite of deep learning models for regression, classification, and distributional regression tasks. Designed with ease of use in mind, Mambular models adhere to scikit-learn's `BaseEstimator` interface, making them highly compatible with the familiar scikit-learn ecosystem. This means you can fit, predict, and transform using Mambular models just as you would with any traditional scikit-learn model, but with the added performance and flexibility of deep learning. - -## Features - -- **Comprehensive Model Suite**: Includes modules for regression (`MambularRegressor`), classification (`MambularClassifier`), and distributional regression (`MambularLSS`), catering to a wide range of tabular data tasks. -- **State-of-the-Art Architectures**: Leverages the Mamba architecture, known for its effectiveness in handling sequential and time-series data within a state-space modeling framework, adapted here for tabular data. -- **Seamless Integration**: Designed to work effortlessly with scikit-learn, allowing for easy inclusion in existing machine learning pipelines, cross-validation, and hyperparameter tuning workflows. -- **Extensive Preprocessing**: Comes with a powerful preprocessing module that supports a broad array of data transformation techniques, ensuring that your data is optimally prepared for model training. -- **Sklearn-like API**: The familiar scikit-learn `fit`, `predict`, and `predict_proba` methods mean minimal learning curve for those already accustomed to scikit-learn. -- **PyTorch Lightning Under the Hood**: Built on top of PyTorch Lightning, Mambular models benefit from streamlined training processes, easy customization, and advanced features like distributed training and 16-bit precision. - -## Installation - -Install Mambular using pip: -```sh -pip install mambular -``` - -## Advanced Preprocessing for Optimal Performance - -Mambular elevates the preprocessing stage of model development, employing a sophisticated suite of techniques to ensure your data is in the best shape for the Mamba architectures. Our preprocessing module is designed to be both powerful and intuitive, offering a range of options to transform your tabular data efficiently. - -### Intelligent Data Type Detection and Transformation - -Mambular automatically identifies the type of each feature in your dataset, applying the most suitable transformations to numerical and categorical variables. This includes: - -- **Ordinal Encoding**: Categorical features are seamlessly transformed into numerical values, preserving their inherent order and making them model-ready. -- **One-Hot Encoding**: For nominal data, Mambular employs one-hot encoding to capture the presence or absence of categories without imposing ordinality. -- **Binning**: Numerical features can be discretized into bins, a useful technique for handling continuous variables in certain modeling contexts. -- **Decision Tree Binning**: Optionally, Mambular can use decision trees to find the optimal binning strategy for numerical features, enhancing model interpretability and performance. -- **Normalization**: Mambular can easily handle numerical features without specifically turning them into categorical features. Standard preprocessing steps such as normalization per feature are possible -- **Standardization**: Similarly, Standardization instead of Normalization can be used. - - -### Handling Missing Values - -Our preprocessing pipeline gracefully handles missing data, employing strategies like mean imputation for numerical features and mode imputation for categorical ones, ensuring that your models receive complete data inputs without manual intervention. - -### Flexible and Customizable - -While Mambular excels in automating the preprocessing workflow, it also offers flexibility. You can customize the preprocessing steps to fit the unique needs of your dataset, ensuring that you're not locked into a one-size-fits-all approach. - -By integrating Mambular's preprocessing module into your workflow, you're not just preparing your data for deep learning; you're optimizing it for excellence. This commitment to data quality is what sets Mambular apart, making it an indispensable tool in your machine learning arsenal. - - -## Fit a Model -Fitting a model in mambular is as simple as it gets. All models in mambular are sklearn BaseEstimators. Thus the `.fit` method is implemented for all of them. Additionally, this allows for using all other sklearn inherent methods such as their built in hyperparameter optimization tools. - -```python -from mambular.models import MambularClassifier -# Initialize and fit your model -model = MambularClassifier( - dropout=0.01, - d_model=128, - n_layers=6, - numerical_preprocessing="normalization", -) - -# X can be a dataframe or something that can be easily transformed into a pd.DataFrame as a np.array -model.fit(X, y, max_epochs=500, lr=1e-03, patience=25) -``` - -Predictions are also easily obtained: -```python -# simple predictions -preds = model.predict(X) - -# Predict probabilities -preds = model.predict_proba(X) -``` - - -## Distributional Regression with MambularLSS - -Mambular introduces a cutting-edge approach to distributional regression through its `MambularLSS` module, empowering users to model the full distribution of a response variable, not just its mean. This method is particularly valuable in scenarios where understanding the variability, skewness, or kurtosis of the response distribution is as crucial as predicting its central tendency. - -### Key Features of MambularLSS: - -- **Full Distribution Modeling**: Unlike traditional regression models that predict a single value (e.g., the mean), `MambularLSS` models the entire distribution of the response variable. This allows for more informative predictions, including quantiles, variance, and higher moments. -- **Customizable Distribution Types**: `MambularLSS` supports a variety of distribution families (e.g., Gaussian, Poisson, Binomial), making it adaptable to different types of response variables, from continuous to count data. -- **Location, Scale, Shape Parameters**: The model predicts parameters corresponding to the location, scale, and shape of the distribution, offering a nuanced understanding of the data's underlying distributional characteristics. -- **Enhanced Predictive Uncertainty**: By modeling the full distribution, `MambularLSS` provides richer information on predictive uncertainty, enabling more robust decision-making processes in uncertain environments. - - -### Available Distribution Classes: - -`MambularLSS` offers a wide range of distribution classes to cater to various statistical modeling needs. The available distribution classes include: - -- `normal`: Normal Distribution for modeling continuous data with a symmetric distribution around the mean. -- `poisson`: Poisson Distribution for modeling count data that represents the number of events occurring within a fixed interval. -- `gamma`: Gamma Distribution for modeling continuous data that is skewed and bounded at zero, often used for waiting times. -- `beta`: Beta Distribution for modeling data that is bounded between 0 and 1, useful for proportions and percentages. -- `dirichlet`: Dirichlet Distribution for modeling multivariate data where individual components are correlated, and the sum is constrained to 1. -- `studentt`: Student's T-Distribution for modeling data with heavier tails than the normal distribution, useful when the sample size is small. -- `negativebinom`: Negative Binomial Distribution for modeling count data with over-dispersion relative to the Poisson distribution. -- `inversegamma`: Inverse Gamma Distribution, often used as a prior distribution in Bayesian inference for scale parameters. -- `categorical`: Categorical Distribution for modeling categorical data with more than two categories. - -These distribution classes allow `MambularLSS` to flexibly model a wide variety of data types and distributions, providing users with the tools needed to capture the full complexity of their data. - - -### Use Cases for MambularLSS: - -- **Risk Assessment**: In finance or insurance, understanding the range and likelihood of potential losses is as important as predicting average outcomes. -- **Demand Forecasting**: For inventory management, capturing the variability in product demand helps in optimizing stock levels. -- **Personalized Medicine**: In healthcare, distributional regression can predict a range of possible patient responses to a treatment, aiding in personalized therapy planning. - -### Getting Started with MambularLSS: - -To integrate distributional regression into your workflow with `MambularLSS`, start by initializing the model with your desired configuration, similar to other Mambular models: - -```python -from mambular.models import MambularLSS - -# Initialize the MambularLSS model -model = MambularLSS( - dropout=0.2, - d_model=256, - n_layers=4, - -) - -# Fit the model to your data -model.fit( - X, - y, - max_epochs=300, - lr=1e-03, - patience=10, - family="normal" # define your distribution - ) - -``` - diff --git a/docs/$READTHEDOCS_OUTPUT/html/_sources/quickstart.md.txt b/docs/$READTHEDOCS_OUTPUT/html/_sources/quickstart.md.txt deleted file mode 100644 index 5004232..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_sources/quickstart.md.txt +++ /dev/null @@ -1,9 +0,0 @@ -# Quickstart - -Example code for implementing the models: - -1. [Classification](https://github.com/basf/mamba-tabular/blob/master/example_classification.py) -2. [Distributional](https://github.com/basf/mamba-tabular/blob/master/example_distributional.py) -3. [Embedding Regression](https://github.com/basf/mamba-tabular/blob/master/example_embedding_regression.py) -4. [Regression](https://github.com/basf/mamba-tabular/blob/master/example_regression.py) - diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/_sphinx_javascript_frameworks_compat.js b/docs/$READTHEDOCS_OUTPUT/html/_static/_sphinx_javascript_frameworks_compat.js deleted file mode 100644 index 8141580..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/_sphinx_javascript_frameworks_compat.js +++ /dev/null @@ -1,123 +0,0 @@ -/* Compatability shim for jQuery and underscores.js. - * - * Copyright Sphinx contributors - * Released under the two clause BSD licence - */ - -/** - * small helper function to urldecode strings - * - * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL - */ -jQuery.urldecode = function(x) { - if (!x) { - return x - } - return decodeURIComponent(x.replace(/\+/g, ' ')); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - var bbox = node.parentElement.getBBox(); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/basic.css b/docs/$READTHEDOCS_OUTPUT/html/_static/basic.css deleted file mode 100644 index 7577acb..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/basic.css +++ /dev/null @@ -1,903 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -div.section::after { - display: block; - content: ''; - clear: left; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox form.search { - overflow: hidden; -} - -div.sphinxsidebar #searchbox input[type="text"] { - float: left; - width: 80%; - padding: 0.25em; - box-sizing: border-box; -} - -div.sphinxsidebar #searchbox input[type="submit"] { - float: left; - width: 20%; - border-left: none; - padding: 0.25em; - box-sizing: border-box; -} - - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li p.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body { - min-width: 360px; - max-width: 800px; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, figure.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, figure.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, figure.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -img.align-default, figure.align-default, .figure.align-default { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-default { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar, -aside.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px; - background-color: #ffe; - width: 40%; - float: right; - clear: right; - overflow-x: auto; -} - -p.sidebar-title { - font-weight: bold; -} - -nav.contents, -aside.topic, -div.admonition, div.topic, blockquote { - clear: left; -} - -/* -- topics ---------------------------------------------------------------- */ - -nav.contents, -aside.topic, -div.topic { - border: 1px solid #ccc; - padding: 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- content of sidebars/topics/admonitions -------------------------------- */ - -div.sidebar > :last-child, -aside.sidebar > :last-child, -nav.contents > :last-child, -aside.topic > :last-child, -div.topic > :last-child, -div.admonition > :last-child { - margin-bottom: 0; -} - -div.sidebar::after, -aside.sidebar::after, -nav.contents::after, -aside.topic::after, -div.topic::after, -div.admonition::after, -blockquote::after { - display: block; - content: ''; - clear: both; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - margin-top: 10px; - margin-bottom: 10px; - border: 0; - border-collapse: collapse; -} - -table.align-center { - margin-left: auto; - margin-right: auto; -} - -table.align-default { - margin-left: auto; - margin-right: auto; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -th > :first-child, -td > :first-child { - margin-top: 0px; -} - -th > :last-child, -td > :last-child { - margin-bottom: 0px; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure, figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption, figcaption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number, -figcaption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text, -figcaption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -/* -- hlist styles ---------------------------------------------------------- */ - -table.hlist { - margin: 1em 0; -} - -table.hlist td { - vertical-align: top; -} - -/* -- object description styles --------------------------------------------- */ - -.sig { - font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; -} - -.sig-name, code.descname { - background-color: transparent; - font-weight: bold; -} - -.sig-name { - font-size: 1.1em; -} - -code.descname { - font-size: 1.2em; -} - -.sig-prename, code.descclassname { - background-color: transparent; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.sig-param.n { - font-style: italic; -} - -/* C++ specific styling */ - -.sig-inline.c-texpr, -.sig-inline.cpp-texpr { - font-family: unset; -} - -.sig.c .k, .sig.c .kt, -.sig.cpp .k, .sig.cpp .kt { - color: #0033B3; -} - -.sig.c .m, -.sig.cpp .m { - color: #1750EB; -} - -.sig.c .s, .sig.c .sc, -.sig.cpp .s, .sig.cpp .sc { - color: #067D17; -} - - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -:not(li) > ol > li:first-child > :first-child, -:not(li) > ul > li:first-child > :first-child { - margin-top: 0px; -} - -:not(li) > ol > li:last-child > :last-child, -:not(li) > ul > li:last-child > :last-child { - margin-bottom: 0px; -} - -ol.simple ol p, -ol.simple ul p, -ul.simple ol p, -ul.simple ul p { - margin-top: 0; -} - -ol.simple > li:not(:first-child) > p, -ul.simple > li:not(:first-child) > p { - margin-top: 0; -} - -ol.simple p, -ul.simple p { - margin-bottom: 0; -} - -aside.footnote > span, -div.citation > span { - float: left; -} -aside.footnote > span:last-of-type, -div.citation > span:last-of-type { - padding-right: 0.5em; -} -aside.footnote > p { - margin-left: 2em; -} -div.citation > p { - margin-left: 4em; -} -aside.footnote > p:last-of-type, -div.citation > p:last-of-type { - margin-bottom: 0em; -} -aside.footnote > p:last-of-type:after, -div.citation > p:last-of-type:after { - content: ""; - clear: both; -} - -dl.field-list { - display: grid; - grid-template-columns: fit-content(30%) auto; -} - -dl.field-list > dt { - font-weight: bold; - word-break: break-word; - padding-left: 0.5em; - padding-right: 5px; -} - -dl.field-list > dd { - padding-left: 0.5em; - margin-top: 0em; - margin-left: 0em; - margin-bottom: 0em; -} - -dl { - margin-bottom: 15px; -} - -dd > :first-child { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dl > dd:last-child, -dl > dd:last-child > :last-child { - margin-bottom: 0; -} - -dt:target, span.highlighted { - background-color: #fbe54e; -} - -rect.highlighted { - fill: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -.classifier:before { - font-style: normal; - margin: 0 0.5em; - content: ":"; - display: inline-block; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -pre, div[class*="highlight-"] { - clear: both; -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; - white-space: nowrap; -} - -div[class*="highlight-"] { - margin: 1em 0; -} - -td.linenos pre { - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - display: block; -} - -table.highlighttable tbody { - display: block; -} - -table.highlighttable tr { - display: flex; -} - -table.highlighttable td { - margin: 0; - padding: 0; -} - -table.highlighttable td.linenos { - padding-right: 0.5em; -} - -table.highlighttable td.code { - flex: 1; - overflow: hidden; -} - -.highlight .hll { - display: block; -} - -div.highlight pre, -table.highlighttable pre { - margin: 0; -} - -div.code-block-caption + div { - margin-top: 0; -} - -div.code-block-caption { - margin-top: 1em; - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -table.highlighttable td.linenos, -span.linenos, -div.highlight span.gp { /* gp: Generic.Prompt */ - user-select: none; - -webkit-user-select: text; /* Safari fallback only */ - -webkit-user-select: none; /* Chrome/Safari */ - -moz-user-select: none; /* Firefox */ - -ms-user-select: none; /* IE10+ */ -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - margin: 1em 0; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: absolute; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/check-solid.svg b/docs/$READTHEDOCS_OUTPUT/html/_static/check-solid.svg deleted file mode 100644 index 92fad4b..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/check-solid.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/clipboard.min.js b/docs/$READTHEDOCS_OUTPUT/html/_static/clipboard.min.js deleted file mode 100644 index 54b3c46..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/clipboard.min.js +++ /dev/null @@ -1,7 +0,0 @@ -/*! - * clipboard.js v2.0.8 - * https://clipboardjs.com/ - * - * Licensed MIT © Zeno Rocha - */ -!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.ClipboardJS=e():t.ClipboardJS=e()}(this,function(){return n={686:function(t,e,n){"use strict";n.d(e,{default:function(){return o}});var e=n(279),i=n.n(e),e=n(370),u=n.n(e),e=n(817),c=n.n(e);function a(t){try{return document.execCommand(t)}catch(t){return}}var f=function(t){t=c()(t);return a("cut"),t};var l=function(t){var e,n,o,r=1 - - - - diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/copybutton.css b/docs/$READTHEDOCS_OUTPUT/html/_static/copybutton.css deleted file mode 100644 index f1916ec..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/copybutton.css +++ /dev/null @@ -1,94 +0,0 @@ -/* Copy buttons */ -button.copybtn { - position: absolute; - display: flex; - top: .3em; - right: .3em; - width: 1.7em; - height: 1.7em; - opacity: 0; - transition: opacity 0.3s, border .3s, background-color .3s; - user-select: none; - padding: 0; - border: none; - outline: none; - border-radius: 0.4em; - /* The colors that GitHub uses */ - border: #1b1f2426 1px solid; - background-color: #f6f8fa; - color: #57606a; -} - -button.copybtn.success { - border-color: #22863a; - color: #22863a; -} - -button.copybtn svg { - stroke: currentColor; - width: 1.5em; - height: 1.5em; - padding: 0.1em; -} - -div.highlight { - position: relative; -} - -/* Show the copybutton */ -.highlight:hover button.copybtn, button.copybtn.success { - opacity: 1; -} - -.highlight button.copybtn:hover { - background-color: rgb(235, 235, 235); -} - -.highlight button.copybtn:active { - background-color: rgb(187, 187, 187); -} - -/** - * A minimal CSS-only tooltip copied from: - * https://codepen.io/mildrenben/pen/rVBrpK - * - * To use, write HTML like the following: - * - *

Short

- */ - .o-tooltip--left { - position: relative; - } - - .o-tooltip--left:after { - opacity: 0; - visibility: hidden; - position: absolute; - content: attr(data-tooltip); - padding: .2em; - font-size: .8em; - left: -.2em; - background: grey; - color: white; - white-space: nowrap; - z-index: 2; - border-radius: 2px; - transform: translateX(-102%) translateY(0); - transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1); -} - -.o-tooltip--left:hover:after { - display: block; - opacity: 1; - visibility: visible; - transform: translateX(-100%) translateY(0); - transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1); - transition-delay: .5s; -} - -/* By default the copy button shouldn't show up when printing a page */ -@media print { - button.copybtn { - display: none; - } -} diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/copybutton.js b/docs/$READTHEDOCS_OUTPUT/html/_static/copybutton.js deleted file mode 100644 index 2ea7ff3..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/copybutton.js +++ /dev/null @@ -1,248 +0,0 @@ -// Localization support -const messages = { - 'en': { - 'copy': 'Copy', - 'copy_to_clipboard': 'Copy to clipboard', - 'copy_success': 'Copied!', - 'copy_failure': 'Failed to copy', - }, - 'es' : { - 'copy': 'Copiar', - 'copy_to_clipboard': 'Copiar al portapapeles', - 'copy_success': '¡Copiado!', - 'copy_failure': 'Error al copiar', - }, - 'de' : { - 'copy': 'Kopieren', - 'copy_to_clipboard': 'In die Zwischenablage kopieren', - 'copy_success': 'Kopiert!', - 'copy_failure': 'Fehler beim Kopieren', - }, - 'fr' : { - 'copy': 'Copier', - 'copy_to_clipboard': 'Copier dans le presse-papier', - 'copy_success': 'Copié !', - 'copy_failure': 'Échec de la copie', - }, - 'ru': { - 'copy': 'Скопировать', - 'copy_to_clipboard': 'Скопировать в буфер', - 'copy_success': 'Скопировано!', - 'copy_failure': 'Не удалось скопировать', - }, - 'zh-CN': { - 'copy': '复制', - 'copy_to_clipboard': '复制到剪贴板', - 'copy_success': '复制成功!', - 'copy_failure': '复制失败', - }, - 'it' : { - 'copy': 'Copiare', - 'copy_to_clipboard': 'Copiato negli appunti', - 'copy_success': 'Copiato!', - 'copy_failure': 'Errore durante la copia', - } -} - -let locale = 'en' -if( document.documentElement.lang !== undefined - && messages[document.documentElement.lang] !== undefined ) { - locale = document.documentElement.lang -} - -let doc_url_root = DOCUMENTATION_OPTIONS.URL_ROOT; -if (doc_url_root == '#') { - doc_url_root = ''; -} - -/** - * SVG files for our copy buttons - */ -let iconCheck = ` - ${messages[locale]['copy_success']} - - -` - -// If the user specified their own SVG use that, otherwise use the default -let iconCopy = ``; -if (!iconCopy) { - iconCopy = ` - ${messages[locale]['copy_to_clipboard']} - - - -` -} - -/** - * Set up copy/paste for code blocks - */ - -const runWhenDOMLoaded = cb => { - if (document.readyState != 'loading') { - cb() - } else if (document.addEventListener) { - document.addEventListener('DOMContentLoaded', cb) - } else { - document.attachEvent('onreadystatechange', function() { - if (document.readyState == 'complete') cb() - }) - } -} - -const codeCellId = index => `codecell${index}` - -// Clears selected text since ClipboardJS will select the text when copying -const clearSelection = () => { - if (window.getSelection) { - window.getSelection().removeAllRanges() - } else if (document.selection) { - document.selection.empty() - } -} - -// Changes tooltip text for a moment, then changes it back -// We want the timeout of our `success` class to be a bit shorter than the -// tooltip and icon change, so that we can hide the icon before changing back. -var timeoutIcon = 2000; -var timeoutSuccessClass = 1500; - -const temporarilyChangeTooltip = (el, oldText, newText) => { - el.setAttribute('data-tooltip', newText) - el.classList.add('success') - // Remove success a little bit sooner than we change the tooltip - // So that we can use CSS to hide the copybutton first - setTimeout(() => el.classList.remove('success'), timeoutSuccessClass) - setTimeout(() => el.setAttribute('data-tooltip', oldText), timeoutIcon) -} - -// Changes the copy button icon for two seconds, then changes it back -const temporarilyChangeIcon = (el) => { - el.innerHTML = iconCheck; - setTimeout(() => {el.innerHTML = iconCopy}, timeoutIcon) -} - -const addCopyButtonToCodeCells = () => { - // If ClipboardJS hasn't loaded, wait a bit and try again. This - // happens because we load ClipboardJS asynchronously. - if (window.ClipboardJS === undefined) { - setTimeout(addCopyButtonToCodeCells, 250) - return - } - - // Add copybuttons to all of our code cells - const COPYBUTTON_SELECTOR = 'div.highlight pre'; - const codeCells = document.querySelectorAll(COPYBUTTON_SELECTOR) - codeCells.forEach((codeCell, index) => { - const id = codeCellId(index) - codeCell.setAttribute('id', id) - - const clipboardButton = id => - `` - codeCell.insertAdjacentHTML('afterend', clipboardButton(id)) - }) - -function escapeRegExp(string) { - return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string -} - -/** - * Removes excluded text from a Node. - * - * @param {Node} target Node to filter. - * @param {string} exclude CSS selector of nodes to exclude. - * @returns {DOMString} Text from `target` with text removed. - */ -function filterText(target, exclude) { - const clone = target.cloneNode(true); // clone as to not modify the live DOM - if (exclude) { - // remove excluded nodes - clone.querySelectorAll(exclude).forEach(node => node.remove()); - } - return clone.innerText; -} - -// Callback when a copy button is clicked. Will be passed the node that was clicked -// should then grab the text and replace pieces of text that shouldn't be used in output -function formatCopyText(textContent, copybuttonPromptText, isRegexp = false, onlyCopyPromptLines = true, removePrompts = true, copyEmptyLines = true, lineContinuationChar = "", hereDocDelim = "") { - var regexp; - var match; - - // Do we check for line continuation characters and "HERE-documents"? - var useLineCont = !!lineContinuationChar - var useHereDoc = !!hereDocDelim - - // create regexp to capture prompt and remaining line - if (isRegexp) { - regexp = new RegExp('^(' + copybuttonPromptText + ')(.*)') - } else { - regexp = new RegExp('^(' + escapeRegExp(copybuttonPromptText) + ')(.*)') - } - - const outputLines = []; - var promptFound = false; - var gotLineCont = false; - var gotHereDoc = false; - const lineGotPrompt = []; - for (const line of textContent.split('\n')) { - match = line.match(regexp) - if (match || gotLineCont || gotHereDoc) { - promptFound = regexp.test(line) - lineGotPrompt.push(promptFound) - if (removePrompts && promptFound) { - outputLines.push(match[2]) - } else { - outputLines.push(line) - } - gotLineCont = line.endsWith(lineContinuationChar) & useLineCont - if (line.includes(hereDocDelim) & useHereDoc) - gotHereDoc = !gotHereDoc - } else if (!onlyCopyPromptLines) { - outputLines.push(line) - } else if (copyEmptyLines && line.trim() === '') { - outputLines.push(line) - } - } - - // If no lines with the prompt were found then just use original lines - if (lineGotPrompt.some(v => v === true)) { - textContent = outputLines.join('\n'); - } - - // Remove a trailing newline to avoid auto-running when pasting - if (textContent.endsWith("\n")) { - textContent = textContent.slice(0, -1) - } - return textContent -} - - -var copyTargetText = (trigger) => { - var target = document.querySelector(trigger.attributes['data-clipboard-target'].value); - - // get filtered text - let exclude = '.linenos'; - - let text = filterText(target, exclude); - return formatCopyText(text, '', false, true, true, true, '', '') -} - - // Initialize with a callback so we can modify the text before copy - const clipboard = new ClipboardJS('.copybtn', {text: copyTargetText}) - - // Update UI with error/success messages - clipboard.on('success', event => { - clearSelection() - temporarilyChangeTooltip(event.trigger, messages[locale]['copy'], messages[locale]['copy_success']) - temporarilyChangeIcon(event.trigger) - }) - - clipboard.on('error', event => { - temporarilyChangeTooltip(event.trigger, messages[locale]['copy'], messages[locale]['copy_failure']) - }) -} - -runWhenDOMLoaded(addCopyButtonToCodeCells) \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/copybutton_funcs.js b/docs/$READTHEDOCS_OUTPUT/html/_static/copybutton_funcs.js deleted file mode 100644 index dbe1aaa..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/copybutton_funcs.js +++ /dev/null @@ -1,73 +0,0 @@ -function escapeRegExp(string) { - return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string -} - -/** - * Removes excluded text from a Node. - * - * @param {Node} target Node to filter. - * @param {string} exclude CSS selector of nodes to exclude. - * @returns {DOMString} Text from `target` with text removed. - */ -export function filterText(target, exclude) { - const clone = target.cloneNode(true); // clone as to not modify the live DOM - if (exclude) { - // remove excluded nodes - clone.querySelectorAll(exclude).forEach(node => node.remove()); - } - return clone.innerText; -} - -// Callback when a copy button is clicked. Will be passed the node that was clicked -// should then grab the text and replace pieces of text that shouldn't be used in output -export function formatCopyText(textContent, copybuttonPromptText, isRegexp = false, onlyCopyPromptLines = true, removePrompts = true, copyEmptyLines = true, lineContinuationChar = "", hereDocDelim = "") { - var regexp; - var match; - - // Do we check for line continuation characters and "HERE-documents"? - var useLineCont = !!lineContinuationChar - var useHereDoc = !!hereDocDelim - - // create regexp to capture prompt and remaining line - if (isRegexp) { - regexp = new RegExp('^(' + copybuttonPromptText + ')(.*)') - } else { - regexp = new RegExp('^(' + escapeRegExp(copybuttonPromptText) + ')(.*)') - } - - const outputLines = []; - var promptFound = false; - var gotLineCont = false; - var gotHereDoc = false; - const lineGotPrompt = []; - for (const line of textContent.split('\n')) { - match = line.match(regexp) - if (match || gotLineCont || gotHereDoc) { - promptFound = regexp.test(line) - lineGotPrompt.push(promptFound) - if (removePrompts && promptFound) { - outputLines.push(match[2]) - } else { - outputLines.push(line) - } - gotLineCont = line.endsWith(lineContinuationChar) & useLineCont - if (line.includes(hereDocDelim) & useHereDoc) - gotHereDoc = !gotHereDoc - } else if (!onlyCopyPromptLines) { - outputLines.push(line) - } else if (copyEmptyLines && line.trim() === '') { - outputLines.push(line) - } - } - - // If no lines with the prompt were found then just use original lines - if (lineGotPrompt.some(v => v === true)) { - textContent = outputLines.join('\n'); - } - - // Remove a trailing newline to avoid auto-running when pasting - if (textContent.endsWith("\n")) { - textContent = textContent.slice(0, -1) - } - return textContent -} diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/css/badge_only.css b/docs/$READTHEDOCS_OUTPUT/html/_static/css/badge_only.css deleted file mode 100644 index c718cee..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/css/badge_only.css +++ /dev/null @@ -1 +0,0 @@ -.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/Roboto-Slab-Bold.woff b/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/Roboto-Slab-Bold.woff deleted file mode 100644 index 6cb6000..0000000 Binary files a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/Roboto-Slab-Bold.woff and /dev/null differ diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/Roboto-Slab-Bold.woff2 b/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/Roboto-Slab-Bold.woff2 deleted file mode 100644 index 7059e23..0000000 Binary files a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/Roboto-Slab-Bold.woff2 and /dev/null differ diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/Roboto-Slab-Regular.woff b/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/Roboto-Slab-Regular.woff deleted file mode 100644 index f815f63..0000000 Binary files a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/Roboto-Slab-Regular.woff and /dev/null differ diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/Roboto-Slab-Regular.woff2 b/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/Roboto-Slab-Regular.woff2 deleted file mode 100644 index f2c76e5..0000000 Binary files a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/Roboto-Slab-Regular.woff2 and /dev/null differ diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/fontawesome-webfont.eot b/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/fontawesome-webfont.eot deleted file mode 100644 index e9f60ca..0000000 Binary files a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/fontawesome-webfont.eot and /dev/null differ diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/fontawesome-webfont.svg b/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/fontawesome-webfont.svg deleted file mode 100644 index 855c845..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/fontawesome-webfont.svg +++ /dev/null @@ -1,2671 +0,0 @@ - - - - -Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 - By ,,, -Copyright Dave Gandy 2016. All rights reserved. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/fontawesome-webfont.ttf b/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/fontawesome-webfont.ttf deleted file mode 100644 index 35acda2..0000000 Binary files a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/fontawesome-webfont.ttf and /dev/null differ diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/fontawesome-webfont.woff b/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/fontawesome-webfont.woff deleted file mode 100644 index 400014a..0000000 Binary files a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/fontawesome-webfont.woff and /dev/null differ diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/fontawesome-webfont.woff2 b/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/fontawesome-webfont.woff2 deleted file mode 100644 index 4d13fc6..0000000 Binary files a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/fontawesome-webfont.woff2 and /dev/null differ diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-bold-italic.woff b/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-bold-italic.woff deleted file mode 100644 index 88ad05b..0000000 Binary files a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-bold-italic.woff and /dev/null differ diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-bold-italic.woff2 b/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-bold-italic.woff2 deleted file mode 100644 index c4e3d80..0000000 Binary files a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-bold-italic.woff2 and /dev/null differ diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-bold.woff b/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-bold.woff deleted file mode 100644 index c6dff51..0000000 Binary files a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-bold.woff and /dev/null differ diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-bold.woff2 b/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-bold.woff2 deleted file mode 100644 index bb19504..0000000 Binary files a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-bold.woff2 and /dev/null differ diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-normal-italic.woff b/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-normal-italic.woff deleted file mode 100644 index 76114bc..0000000 Binary files a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-normal-italic.woff and /dev/null differ diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-normal-italic.woff2 b/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-normal-italic.woff2 deleted file mode 100644 index 3404f37..0000000 Binary files a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-normal-italic.woff2 and /dev/null differ diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-normal.woff b/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-normal.woff deleted file mode 100644 index ae1307f..0000000 Binary files a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-normal.woff and /dev/null differ diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-normal.woff2 b/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-normal.woff2 deleted file mode 100644 index 3bf9843..0000000 Binary files a/docs/$READTHEDOCS_OUTPUT/html/_static/css/fonts/lato-normal.woff2 and /dev/null differ diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/css/theme.css b/docs/$READTHEDOCS_OUTPUT/html/_static/css/theme.css deleted file mode 100644 index 19a446a..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/css/theme.css +++ /dev/null @@ -1,4 +0,0 @@ -html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}[hidden],audio:not([controls]){display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;text-decoration:none}ins,mark{color:#000}mark{background:#ff0;font-style:italic;font-weight:700}.rst-content code,.rst-content tt,code,kbd,pre,samp{font-family:monospace,serif;_font-family:courier new,monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:after,q:before{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}dl,ol,ul{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure,form{margin:0}label{cursor:pointer}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type=button],input[type=reset],input[type=submit]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}textarea{resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none!important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{body,html,section{background:none!important}*{box-shadow:none!important;text-shadow:none!important;filter:none!important;-ms-filter:none!important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}.rst-content .toctree-wrapper>p.caption,h2,h3,p{orphans:3;widows:3}.rst-content .toctree-wrapper>p.caption,h2,h3{page-break-after:avoid}}.btn,.fa:before,.icon:before,.rst-content .admonition,.rst-content .admonition-title:before,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .code-block-caption .headerlink:before,.rst-content .danger,.rst-content .eqno .headerlink:before,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-alert,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before,input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week],select,textarea{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}/*! - * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome - * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) - */@font-face{font-family:FontAwesome;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713);src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix&v=4.7.0) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#fontawesomeregular) format("svg");font-weight:400;font-style:normal}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14286em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14286em;width:2.14286em;top:.14286em;text-align:center}.fa-li.fa-lg{left:-1.85714em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa-pull-left.icon,.fa.fa-pull-left,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content .eqno .fa-pull-left.headerlink,.rst-content .fa-pull-left.admonition-title,.rst-content code.download span.fa-pull-left:first-child,.rst-content dl dt .fa-pull-left.headerlink,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content p .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.wy-menu-vertical li.current>a button.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-left.toctree-expand,.wy-menu-vertical li button.fa-pull-left.toctree-expand{margin-right:.3em}.fa-pull-right.icon,.fa.fa-pull-right,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content .eqno .fa-pull-right.headerlink,.rst-content .fa-pull-right.admonition-title,.rst-content code.download span.fa-pull-right:first-child,.rst-content dl dt .fa-pull-right.headerlink,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content p .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.wy-menu-vertical li.current>a button.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-right.toctree-expand,.wy-menu-vertical li button.fa-pull-right.toctree-expand{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.pull-left.icon,.rst-content .code-block-caption .pull-left.headerlink,.rst-content .eqno .pull-left.headerlink,.rst-content .pull-left.admonition-title,.rst-content code.download span.pull-left:first-child,.rst-content dl dt .pull-left.headerlink,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content p .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.wy-menu-vertical li.current>a button.pull-left.toctree-expand,.wy-menu-vertical li.on a button.pull-left.toctree-expand,.wy-menu-vertical li button.pull-left.toctree-expand{margin-right:.3em}.fa.pull-right,.pull-right.icon,.rst-content .code-block-caption .pull-right.headerlink,.rst-content .eqno .pull-right.headerlink,.rst-content .pull-right.admonition-title,.rst-content code.download span.pull-right:first-child,.rst-content dl dt .pull-right.headerlink,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content p .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.wy-menu-vertical li.current>a button.pull-right.toctree-expand,.wy-menu-vertical li.on a button.pull-right.toctree-expand,.wy-menu-vertical li button.pull-right.toctree-expand{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);-ms-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);-ms-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-close:before,.fa-remove:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-cog:before,.fa-gear:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-repeat:before,.fa-rotate-right:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.rst-content .admonition-title:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-exclamation-triangle:before,.fa-warning:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-cogs:before,.fa-gears:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-floppy-o:before,.fa-save:before{content:""}.fa-square:before{content:""}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.icon-caret-down:before,.wy-dropdown .caret:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-sort:before,.fa-unsorted:before{content:""}.fa-sort-desc:before,.fa-sort-down:before{content:""}.fa-sort-asc:before,.fa-sort-up:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-gavel:before,.fa-legal:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-bolt:before,.fa-flash:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-clipboard:before,.fa-paste:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-chain-broken:before,.fa-unlink:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:""}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:""}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:""}.fa-eur:before,.fa-euro:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-inr:before,.fa-rupee:before{content:""}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:""}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:""}.fa-krw:before,.fa-won:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-try:before,.fa-turkish-lira:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li button.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-bank:before,.fa-institution:before,.fa-university:before{content:""}.fa-graduation-cap:before,.fa-mortar-board:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:""}.fa-file-archive-o:before,.fa-file-zip-o:before{content:""}.fa-file-audio-o:before,.fa-file-sound-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:""}.fa-empire:before,.fa-ge:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-paper-plane:before,.fa-send:before{content:""}.fa-paper-plane-o:before,.fa-send-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-bed:before,.fa-hotel:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-y-combinator:before,.fa-yc:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-television:before,.fa-tv:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:""}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-sign-language:before,.fa-signing:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-address-card:before,.fa-vcard:before{content:""}.fa-address-card-o:before,.fa-vcard-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{font-family:inherit}.fa:before,.icon:before,.rst-content .admonition-title:before,.rst-content .code-block-caption .headerlink:before,.rst-content .eqno .headerlink:before,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before{font-family:FontAwesome;display:inline-block;font-style:normal;font-weight:400;line-height:1;text-decoration:inherit}.rst-content .code-block-caption a .headerlink,.rst-content .eqno a .headerlink,.rst-content a .admonition-title,.rst-content code.download a span:first-child,.rst-content dl dt a .headerlink,.rst-content h1 a .headerlink,.rst-content h2 a .headerlink,.rst-content h3 a .headerlink,.rst-content h4 a .headerlink,.rst-content h5 a .headerlink,.rst-content h6 a .headerlink,.rst-content p.caption a .headerlink,.rst-content p a .headerlink,.rst-content table>caption a .headerlink,.rst-content tt.download a span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li a button.toctree-expand,a .fa,a .icon,a .rst-content .admonition-title,a .rst-content .code-block-caption .headerlink,a .rst-content .eqno .headerlink,a .rst-content code.download span:first-child,a .rst-content dl dt .headerlink,a .rst-content h1 .headerlink,a .rst-content h2 .headerlink,a .rst-content h3 .headerlink,a .rst-content h4 .headerlink,a .rst-content h5 .headerlink,a .rst-content h6 .headerlink,a .rst-content p.caption .headerlink,a .rst-content p .headerlink,a .rst-content table>caption .headerlink,a .rst-content tt.download span:first-child,a .wy-menu-vertical li button.toctree-expand{display:inline-block;text-decoration:inherit}.btn .fa,.btn .icon,.btn .rst-content .admonition-title,.btn .rst-content .code-block-caption .headerlink,.btn .rst-content .eqno .headerlink,.btn .rst-content code.download span:first-child,.btn .rst-content dl dt .headerlink,.btn .rst-content h1 .headerlink,.btn .rst-content h2 .headerlink,.btn .rst-content h3 .headerlink,.btn .rst-content h4 .headerlink,.btn .rst-content h5 .headerlink,.btn .rst-content h6 .headerlink,.btn .rst-content p .headerlink,.btn .rst-content table>caption .headerlink,.btn .rst-content tt.download span:first-child,.btn .wy-menu-vertical li.current>a button.toctree-expand,.btn .wy-menu-vertical li.on a button.toctree-expand,.btn .wy-menu-vertical li button.toctree-expand,.nav .fa,.nav .icon,.nav .rst-content .admonition-title,.nav .rst-content .code-block-caption .headerlink,.nav .rst-content .eqno .headerlink,.nav .rst-content code.download span:first-child,.nav .rst-content dl dt .headerlink,.nav .rst-content h1 .headerlink,.nav .rst-content h2 .headerlink,.nav .rst-content h3 .headerlink,.nav .rst-content h4 .headerlink,.nav .rst-content h5 .headerlink,.nav .rst-content h6 .headerlink,.nav .rst-content p .headerlink,.nav .rst-content table>caption .headerlink,.nav .rst-content tt.download span:first-child,.nav .wy-menu-vertical li.current>a button.toctree-expand,.nav .wy-menu-vertical li.on a button.toctree-expand,.nav .wy-menu-vertical li button.toctree-expand,.rst-content .btn .admonition-title,.rst-content .code-block-caption .btn .headerlink,.rst-content .code-block-caption .nav .headerlink,.rst-content .eqno .btn .headerlink,.rst-content .eqno .nav .headerlink,.rst-content .nav .admonition-title,.rst-content code.download .btn span:first-child,.rst-content code.download .nav span:first-child,.rst-content dl dt .btn .headerlink,.rst-content dl dt .nav .headerlink,.rst-content h1 .btn .headerlink,.rst-content h1 .nav .headerlink,.rst-content h2 .btn .headerlink,.rst-content h2 .nav .headerlink,.rst-content h3 .btn .headerlink,.rst-content h3 .nav .headerlink,.rst-content h4 .btn .headerlink,.rst-content h4 .nav .headerlink,.rst-content h5 .btn .headerlink,.rst-content h5 .nav .headerlink,.rst-content h6 .btn .headerlink,.rst-content h6 .nav .headerlink,.rst-content p .btn .headerlink,.rst-content p .nav .headerlink,.rst-content table>caption .btn .headerlink,.rst-content table>caption .nav .headerlink,.rst-content tt.download .btn span:first-child,.rst-content tt.download .nav span:first-child,.wy-menu-vertical li .btn button.toctree-expand,.wy-menu-vertical li.current>a .btn button.toctree-expand,.wy-menu-vertical li.current>a .nav button.toctree-expand,.wy-menu-vertical li .nav button.toctree-expand,.wy-menu-vertical li.on a .btn button.toctree-expand,.wy-menu-vertical li.on a .nav button.toctree-expand{display:inline}.btn .fa-large.icon,.btn .fa.fa-large,.btn .rst-content .code-block-caption .fa-large.headerlink,.btn .rst-content .eqno .fa-large.headerlink,.btn .rst-content .fa-large.admonition-title,.btn .rst-content code.download span.fa-large:first-child,.btn .rst-content dl dt .fa-large.headerlink,.btn .rst-content h1 .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.btn .rst-content p .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.btn .wy-menu-vertical li button.fa-large.toctree-expand,.nav .fa-large.icon,.nav .fa.fa-large,.nav .rst-content .code-block-caption .fa-large.headerlink,.nav .rst-content .eqno .fa-large.headerlink,.nav .rst-content .fa-large.admonition-title,.nav .rst-content code.download span.fa-large:first-child,.nav .rst-content dl dt .fa-large.headerlink,.nav .rst-content h1 .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.nav .rst-content p .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.nav .wy-menu-vertical li button.fa-large.toctree-expand,.rst-content .btn .fa-large.admonition-title,.rst-content .code-block-caption .btn .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.rst-content .eqno .btn .fa-large.headerlink,.rst-content .eqno .nav .fa-large.headerlink,.rst-content .nav .fa-large.admonition-title,.rst-content code.download .btn span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.rst-content dl dt .btn .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.rst-content p .btn .fa-large.headerlink,.rst-content p .nav .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.rst-content tt.download .btn span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.wy-menu-vertical li .btn button.fa-large.toctree-expand,.wy-menu-vertical li .nav button.fa-large.toctree-expand{line-height:.9em}.btn .fa-spin.icon,.btn .fa.fa-spin,.btn .rst-content .code-block-caption .fa-spin.headerlink,.btn .rst-content .eqno .fa-spin.headerlink,.btn .rst-content .fa-spin.admonition-title,.btn .rst-content code.download span.fa-spin:first-child,.btn .rst-content dl dt .fa-spin.headerlink,.btn .rst-content h1 .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.btn .rst-content p .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.btn .wy-menu-vertical li button.fa-spin.toctree-expand,.nav .fa-spin.icon,.nav .fa.fa-spin,.nav .rst-content .code-block-caption .fa-spin.headerlink,.nav .rst-content .eqno .fa-spin.headerlink,.nav .rst-content .fa-spin.admonition-title,.nav .rst-content code.download span.fa-spin:first-child,.nav .rst-content dl dt .fa-spin.headerlink,.nav .rst-content h1 .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.nav .rst-content p .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.nav .wy-menu-vertical li button.fa-spin.toctree-expand,.rst-content .btn .fa-spin.admonition-title,.rst-content .code-block-caption .btn .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.rst-content .eqno .btn .fa-spin.headerlink,.rst-content .eqno .nav .fa-spin.headerlink,.rst-content .nav .fa-spin.admonition-title,.rst-content code.download .btn span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.rst-content dl dt .btn .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.rst-content p .btn .fa-spin.headerlink,.rst-content p .nav .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.rst-content tt.download .btn span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.wy-menu-vertical li .btn button.fa-spin.toctree-expand,.wy-menu-vertical li .nav button.fa-spin.toctree-expand{display:inline-block}.btn.fa:before,.btn.icon:before,.rst-content .btn.admonition-title:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content .eqno .btn.headerlink:before,.rst-content code.download span.btn:first-child:before,.rst-content dl dt .btn.headerlink:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content p .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.wy-menu-vertical li button.btn.toctree-expand:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.btn.icon:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content .eqno .btn.headerlink:hover:before,.rst-content code.download span.btn:first-child:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content p .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.wy-menu-vertical li button.btn.toctree-expand:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .icon:before,.btn-mini .rst-content .admonition-title:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.btn-mini .rst-content .eqno .headerlink:before,.btn-mini .rst-content code.download span:first-child:before,.btn-mini .rst-content dl dt .headerlink:before,.btn-mini .rst-content h1 .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.btn-mini .rst-content p .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.btn-mini .wy-menu-vertical li button.toctree-expand:before,.rst-content .btn-mini .admonition-title:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.rst-content .eqno .btn-mini .headerlink:before,.rst-content code.download .btn-mini span:first-child:before,.rst-content dl dt .btn-mini .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.rst-content p .btn-mini .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.rst-content tt.download .btn-mini span:first-child:before,.wy-menu-vertical li .btn-mini button.toctree-expand:before{font-size:14px;vertical-align:-15%}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.wy-alert{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.rst-content .admonition-title,.wy-alert-title{font-weight:700;display:block;color:#fff;background:#6ab0de;padding:6px 12px;margin:-12px -12px 12px}.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.admonition,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.wy-alert.wy-alert-danger{background:#fdf3f2}.rst-content .danger .admonition-title,.rst-content .danger .wy-alert-title,.rst-content .error .admonition-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .admonition-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.wy-alert.wy-alert-danger .wy-alert-title{background:#f29f97}.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .warning,.rst-content .wy-alert-warning.admonition,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.note,.rst-content .wy-alert-warning.seealso,.rst-content .wy-alert-warning.tip,.wy-alert.wy-alert-warning{background:#ffedcc}.rst-content .admonition-todo .admonition-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .attention .admonition-title,.rst-content .attention .wy-alert-title,.rst-content .caution .admonition-title,.rst-content .caution .wy-alert-title,.rst-content .warning .admonition-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.admonition .admonition-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.wy-alert.wy-alert-warning .wy-alert-title{background:#f0b37e}.rst-content .note,.rst-content .seealso,.rst-content .wy-alert-info.admonition,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.wy-alert.wy-alert-info{background:#e7f2fa}.rst-content .note .admonition-title,.rst-content .note .wy-alert-title,.rst-content .seealso .admonition-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .admonition-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.wy-alert.wy-alert-info .wy-alert-title{background:#6ab0de}.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.admonition,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.warning,.wy-alert.wy-alert-success{background:#dbfaf4}.rst-content .hint .admonition-title,.rst-content .hint .wy-alert-title,.rst-content .important .admonition-title,.rst-content .important .wy-alert-title,.rst-content .tip .admonition-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .admonition-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.wy-alert.wy-alert-success .wy-alert-title{background:#1abc9c}.rst-content .wy-alert-neutral.admonition,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.wy-alert.wy-alert-neutral{background:#f3f6f6}.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .admonition-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.wy-alert.wy-alert-neutral .wy-alert-title{color:#404040;background:#e1e4e5}.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.wy-alert.wy-alert-neutral a{color:#2980b9}.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .note p:last-child,.rst-content .seealso p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.wy-alert p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27ae60}.wy-tray-container li.wy-tray-item-info{background:#2980b9}.wy-tray-container li.wy-tray-item-warning{background:#e67e22}.wy-tray-container li.wy-tray-item-danger{background:#e74c3c}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width:768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px;color:#fff;border:1px solid rgba(0,0,0,.1);background-color:#27ae60;text-decoration:none;font-weight:400;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 2px -1px hsla(0,0%,100%,.5),inset 0 -2px 0 0 rgba(0,0,0,.1);outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:inset 0 -1px 0 0 rgba(0,0,0,.05),inset 0 2px 0 0 rgba(0,0,0,.1);padding:8px 12px 6px}.btn:visited{color:#fff}.btn-disabled,.btn-disabled:active,.btn-disabled:focus,.btn-disabled:hover,.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980b9!important}.btn-info:hover{background-color:#2e8ece!important}.btn-neutral{background-color:#f3f6f6!important;color:#404040!important}.btn-neutral:hover{background-color:#e5ebeb!important;color:#404040}.btn-neutral:visited{color:#404040!important}.btn-success{background-color:#27ae60!important}.btn-success:hover{background-color:#295!important}.btn-danger{background-color:#e74c3c!important}.btn-danger:hover{background-color:#ea6153!important}.btn-warning{background-color:#e67e22!important}.btn-warning:hover{background-color:#e98b39!important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f!important}.btn-link{background-color:transparent!important;color:#2980b9;box-shadow:none;border-color:transparent!important}.btn-link:active,.btn-link:hover{background-color:transparent!important;color:#409ad5!important;box-shadow:none}.btn-link:visited{color:#9b59b6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:after,.wy-btn-group:before{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:1px solid #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980b9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:1px solid #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type=search]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980b9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned .wy-help-inline,.wy-form-aligned input,.wy-form-aligned label,.wy-form-aligned select,.wy-form-aligned textarea{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{margin:0}fieldset,legend{border:0;padding:0}legend{width:100%;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label,legend{display:block}label{margin:0 0 .3125em;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;max-width:1200px;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:after,.wy-control-group:before{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#e74c3c}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full input[type=color],.wy-control-group .wy-form-full input[type=date],.wy-control-group .wy-form-full input[type=datetime-local],.wy-control-group .wy-form-full input[type=datetime],.wy-control-group .wy-form-full input[type=email],.wy-control-group .wy-form-full input[type=month],.wy-control-group .wy-form-full input[type=number],.wy-control-group .wy-form-full input[type=password],.wy-control-group .wy-form-full input[type=search],.wy-control-group .wy-form-full input[type=tel],.wy-control-group .wy-form-full input[type=text],.wy-control-group .wy-form-full input[type=time],.wy-control-group .wy-form-full input[type=url],.wy-control-group .wy-form-full input[type=week],.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves input[type=color],.wy-control-group .wy-form-halves input[type=date],.wy-control-group .wy-form-halves input[type=datetime-local],.wy-control-group .wy-form-halves input[type=datetime],.wy-control-group .wy-form-halves input[type=email],.wy-control-group .wy-form-halves input[type=month],.wy-control-group .wy-form-halves input[type=number],.wy-control-group .wy-form-halves input[type=password],.wy-control-group .wy-form-halves input[type=search],.wy-control-group .wy-form-halves input[type=tel],.wy-control-group .wy-form-halves input[type=text],.wy-control-group .wy-form-halves input[type=time],.wy-control-group .wy-form-halves input[type=url],.wy-control-group .wy-form-halves input[type=week],.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds input[type=color],.wy-control-group .wy-form-thirds input[type=date],.wy-control-group .wy-form-thirds input[type=datetime-local],.wy-control-group .wy-form-thirds input[type=datetime],.wy-control-group .wy-form-thirds input[type=email],.wy-control-group .wy-form-thirds input[type=month],.wy-control-group .wy-form-thirds input[type=number],.wy-control-group .wy-form-thirds input[type=password],.wy-control-group .wy-form-thirds input[type=search],.wy-control-group .wy-form-thirds input[type=tel],.wy-control-group .wy-form-thirds input[type=text],.wy-control-group .wy-form-thirds input[type=time],.wy-control-group .wy-form-thirds input[type=url],.wy-control-group .wy-form-thirds input[type=week],.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full{float:left;display:block;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.35765%;width:48.82117%}.wy-control-group .wy-form-halves:last-child,.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(odd){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.35765%;width:31.76157%}.wy-control-group .wy-form-thirds:last-child,.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control,.wy-control-no-input{margin:6px 0 0;font-size:90%}.wy-control-no-input{display:inline-block}.wy-control-group.fluid-input input[type=color],.wy-control-group.fluid-input input[type=date],.wy-control-group.fluid-input input[type=datetime-local],.wy-control-group.fluid-input input[type=datetime],.wy-control-group.fluid-input input[type=email],.wy-control-group.fluid-input input[type=month],.wy-control-group.fluid-input input[type=number],.wy-control-group.fluid-input input[type=password],.wy-control-group.fluid-input input[type=search],.wy-control-group.fluid-input input[type=tel],.wy-control-group.fluid-input input[type=text],.wy-control-group.fluid-input input[type=time],.wy-control-group.fluid-input input[type=url],.wy-control-group.fluid-input input[type=week]{width:100%}.wy-form-message-inline{padding-left:.3em;color:#666;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;*overflow:visible}input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type=datetime-local]{padding:.34375em .625em}input[disabled]{cursor:default}input[type=checkbox],input[type=radio]{padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type=checkbox],input[type=radio],input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}input[type=color]:focus,input[type=date]:focus,input[type=datetime-local]:focus,input[type=datetime]:focus,input[type=email]:focus,input[type=month]:focus,input[type=number]:focus,input[type=password]:focus,input[type=search]:focus,input[type=tel]:focus,input[type=text]:focus,input[type=time]:focus,input[type=url]:focus,input[type=week]:focus{outline:0;outline:thin dotted\9;border-color:#333}input.no-focus:focus{border-color:#ccc!important}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:thin dotted #333;outline:1px auto #129fea}input[type=color][disabled],input[type=date][disabled],input[type=datetime-local][disabled],input[type=datetime][disabled],input[type=email][disabled],input[type=month][disabled],input[type=number][disabled],input[type=password][disabled],input[type=search][disabled],input[type=tel][disabled],input[type=text][disabled],input[type=time][disabled],input[type=url][disabled],input[type=week][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,select:focus:invalid,textarea:focus:invalid{color:#e74c3c;border:1px solid #e74c3c}input:focus:invalid:focus,select:focus:invalid:focus,textarea:focus:invalid:focus{border-color:#e74c3c}input[type=checkbox]:focus:invalid:focus,input[type=file]:focus:invalid:focus,input[type=radio]:focus:invalid:focus{outline-color:#e74c3c}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}input[readonly],select[disabled],select[readonly],textarea[disabled],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type=checkbox][disabled],input[type=radio][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:1px solid #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{left:0;top:0;width:36px;height:12px;background:#ccc}.wy-switch:after,.wy-switch:before{position:absolute;content:"";display:block;border-radius:4px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{width:18px;height:18px;background:#999;left:-3px;top:-3px}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27ae60}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#e74c3c}.wy-control-group.wy-control-group-error input[type=color],.wy-control-group.wy-control-group-error input[type=date],.wy-control-group.wy-control-group-error input[type=datetime-local],.wy-control-group.wy-control-group-error input[type=datetime],.wy-control-group.wy-control-group-error input[type=email],.wy-control-group.wy-control-group-error input[type=month],.wy-control-group.wy-control-group-error input[type=number],.wy-control-group.wy-control-group-error input[type=password],.wy-control-group.wy-control-group-error input[type=search],.wy-control-group.wy-control-group-error input[type=tel],.wy-control-group.wy-control-group-error input[type=text],.wy-control-group.wy-control-group-error input[type=time],.wy-control-group.wy-control-group-error input[type=url],.wy-control-group.wy-control-group-error input[type=week],.wy-control-group.wy-control-group-error textarea{border:1px solid #e74c3c}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27ae60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#e74c3c}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#e67e22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980b9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width:480px){.wy-form button[type=submit]{margin:.7em 0 0}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=text],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week],.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0}.wy-form-message,.wy-form-message-inline,.wy-form .wy-help-inline{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width:768px){.tablet-hide{display:none}}@media screen and (max-width:480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.rst-content table.docutils,.rst-content table.field-list,.wy-table{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.rst-content table.docutils caption,.rst-content table.field-list caption,.wy-table caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.rst-content table.docutils td,.rst-content table.docutils th,.rst-content table.field-list td,.rst-content table.field-list th,.wy-table td,.wy-table th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.rst-content table.docutils td:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list td:first-child,.rst-content table.field-list th:first-child,.wy-table td:first-child,.wy-table th:first-child{border-left-width:0}.rst-content table.docutils thead,.rst-content table.field-list thead,.wy-table thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.rst-content table.docutils thead th,.rst-content table.field-list thead th,.wy-table thead th{font-weight:700;border-bottom:2px solid #e1e4e5}.rst-content table.docutils td,.rst-content table.field-list td,.wy-table td{background-color:transparent;vertical-align:middle}.rst-content table.docutils td p,.rst-content table.field-list td p,.wy-table td p{line-height:18px}.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child,.wy-table td p:last-child{margin-bottom:0}.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min,.wy-table .wy-table-cell-min{width:1%;padding-right:0}.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:grey;font-size:90%}.wy-table-tertiary{color:grey;font-size:80%}.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td,.wy-table-backed,.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td{background-color:#f3f6f6}.rst-content table.docutils,.wy-table-bordered-all{border:1px solid #e1e4e5}.rst-content table.docutils td,.wy-table-bordered-all td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.rst-content table.docutils tbody>tr:last-child td,.wy-table-bordered-all tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0!important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980b9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9b59b6}html{height:100%}body,html{overflow-x:hidden}body{font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;font-weight:400;color:#404040;min-height:100%;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#e67e22!important}a.wy-text-warning:hover{color:#eb9950!important}.wy-text-info{color:#2980b9!important}a.wy-text-info:hover{color:#409ad5!important}.wy-text-success{color:#27ae60!important}a.wy-text-success:hover{color:#36d278!important}.wy-text-danger{color:#e74c3c!important}a.wy-text-danger:hover{color:#ed7669!important}.wy-text-neutral{color:#404040!important}a.wy-text-neutral:hover{color:#595959!important}.rst-content .toctree-wrapper>p.caption,h1,h2,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif}p{line-height:24px;font-size:16px;margin:0 0 24px}h1{font-size:175%}.rst-content .toctree-wrapper>p.caption,h2{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}.rst-content code,.rst-content tt,code{white-space:nowrap;max-width:100%;background:#fff;border:1px solid #e1e4e5;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#e74c3c;overflow-x:auto}.rst-content tt.code-large,code.code-large{font-size:90%}.rst-content .section ul,.rst-content .toctree-wrapper ul,.rst-content section ul,.wy-plain-list-disc,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.rst-content .section ul li,.rst-content .toctree-wrapper ul li,.rst-content section ul li,.wy-plain-list-disc li,article ul li{list-style:disc;margin-left:24px}.rst-content .section ul li p:last-child,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li p:last-child,.rst-content .toctree-wrapper ul li ul,.rst-content section ul li p:last-child,.rst-content section ul li ul,.wy-plain-list-disc li p:last-child,.wy-plain-list-disc li ul,article ul li p:last-child,article ul li ul{margin-bottom:0}.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,.rst-content section ul li li,.wy-plain-list-disc li li,article ul li li{list-style:circle}.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,.rst-content section ul li li li,.wy-plain-list-disc li li li,article ul li li li{list-style:square}.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,.rst-content section ul li ol li,.wy-plain-list-disc li ol li,article ul li ol li{list-style:decimal}.rst-content .section ol,.rst-content .section ol.arabic,.rst-content .toctree-wrapper ol,.rst-content .toctree-wrapper ol.arabic,.rst-content section ol,.rst-content section ol.arabic,.wy-plain-list-decimal,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.rst-content .section ol.arabic li,.rst-content .section ol li,.rst-content .toctree-wrapper ol.arabic li,.rst-content .toctree-wrapper ol li,.rst-content section ol.arabic li,.rst-content section ol li,.wy-plain-list-decimal li,article ol li{list-style:decimal;margin-left:24px}.rst-content .section ol.arabic li ul,.rst-content .section ol li p:last-child,.rst-content .section ol li ul,.rst-content .toctree-wrapper ol.arabic li ul,.rst-content .toctree-wrapper ol li p:last-child,.rst-content .toctree-wrapper ol li ul,.rst-content section ol.arabic li ul,.rst-content section ol li p:last-child,.rst-content section ol li ul,.wy-plain-list-decimal li p:last-child,.wy-plain-list-decimal li ul,article ol li p:last-child,article ol li ul{margin-bottom:0}.rst-content .section ol.arabic li ul li,.rst-content .section ol li ul li,.rst-content .toctree-wrapper ol.arabic li ul li,.rst-content .toctree-wrapper ol li ul li,.rst-content section ol.arabic li ul li,.rst-content section ol li ul li,.wy-plain-list-decimal li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:after,.wy-breadcrumbs:before{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs>li{display:inline-block;padding-top:5px}.wy-breadcrumbs>li.wy-breadcrumbs-aside{float:right}.rst-content .wy-breadcrumbs>li code,.rst-content .wy-breadcrumbs>li tt,.wy-breadcrumbs>li .rst-content tt,.wy-breadcrumbs>li code{all:inherit;color:inherit}.breadcrumb-item:before{content:"/";color:#bbb;font-size:13px;padding:0 6px 0 3px}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width:480px){.wy-breadcrumbs-extra,.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:after,.wy-menu-horiz:before{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz li,.wy-menu-horiz ul{display:inline-block}.wy-menu-horiz li:hover{background:hsla(0,0%,100%,.1)}.wy-menu-horiz li.divide-left{border-left:1px solid #404040}.wy-menu-horiz li.divide-right{border-right:1px solid #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#55a5d9;height:32px;line-height:32px;padding:0 1.618em;margin:12px 0 0;display:block;font-weight:700;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:1px solid #404040}.wy-menu-vertical li.divide-bottom{border-bottom:1px solid #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:grey;border-right:1px solid #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.rst-content .wy-menu-vertical li tt,.wy-menu-vertical li .rst-content tt,.wy-menu-vertical li code{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li button.toctree-expand{display:block;float:left;margin-left:-1.2em;line-height:18px;color:#4d4d4d;border:none;background:none;padding:0}.wy-menu-vertical li.current>a,.wy-menu-vertical li.on a{color:#404040;font-weight:700;position:relative;background:#fcfcfc;border:none;padding:.4045em 1.618em}.wy-menu-vertical li.current>a:hover,.wy-menu-vertical li.on a:hover{background:#fcfcfc}.wy-menu-vertical li.current>a:hover button.toctree-expand,.wy-menu-vertical li.on a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand{display:block;line-height:18px;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:1px solid #c9c9c9;border-top:1px solid #c9c9c9}.wy-menu-vertical .toctree-l1.current .toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .toctree-l11>ul{display:none}.wy-menu-vertical .toctree-l1.current .current.toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .current.toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .current.toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .current.toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .current.toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .current.toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .current.toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .current.toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .current.toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .current.toctree-l11>ul{display:block}.wy-menu-vertical li.toctree-l3,.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a,.wy-menu-vertical li.toctree-l5 a,.wy-menu-vertical li.toctree-l6 a,.wy-menu-vertical li.toctree-l7 a,.wy-menu-vertical li.toctree-l8 a,.wy-menu-vertical li.toctree-l9 a,.wy-menu-vertical li.toctree-l10 a{color:#404040}.wy-menu-vertical li.toctree-l2 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l3 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l4 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l5 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l6 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l7 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l8 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l9 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l10 a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{display:block}.wy-menu-vertical li.toctree-l2.current>a{padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{padding:.4045em 1.618em .4045em 4.045em}.wy-menu-vertical li.toctree-l3.current>a{padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{padding:.4045em 1.618em .4045em 5.663em}.wy-menu-vertical li.toctree-l4.current>a{padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a{padding:.4045em 1.618em .4045em 7.281em}.wy-menu-vertical li.toctree-l5.current>a{padding:.4045em 7.281em}.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a{padding:.4045em 1.618em .4045em 8.899em}.wy-menu-vertical li.toctree-l6.current>a{padding:.4045em 8.899em}.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a{padding:.4045em 1.618em .4045em 10.517em}.wy-menu-vertical li.toctree-l7.current>a{padding:.4045em 10.517em}.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a{padding:.4045em 1.618em .4045em 12.135em}.wy-menu-vertical li.toctree-l8.current>a{padding:.4045em 12.135em}.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a{padding:.4045em 1.618em .4045em 13.753em}.wy-menu-vertical li.toctree-l9.current>a{padding:.4045em 13.753em}.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a{padding:.4045em 1.618em .4045em 15.371em}.wy-menu-vertical li.toctree-l10.current>a{padding:.4045em 15.371em}.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{padding:.4045em 1.618em .4045em 16.989em}.wy-menu-vertical li.toctree-l2.current>a,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{background:#c9c9c9}.wy-menu-vertical li.toctree-l2 button.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3.current>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{background:#bdbdbd}.wy-menu-vertical li.toctree-l3 button.toctree-expand{color:#969696}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:400}.wy-menu-vertical a{line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#d9d9d9}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover button.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#2980b9;cursor:pointer;color:#fff}.wy-menu-vertical a:active button.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980b9;text-align:center;color:#fcfcfc}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a{color:#fcfcfc;font-size:100%;font-weight:700;display:inline-block;padding:4px 6px;margin-bottom:.809em;max-width:100%}.wy-side-nav-search .wy-dropdown>a:hover,.wy-side-nav-search>a:hover{background:hsla(0,0%,100%,.1)}.wy-side-nav-search .wy-dropdown>a img.logo,.wy-side-nav-search>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search .wy-dropdown>a.icon img.logo,.wy-side-nav-search>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:400;color:hsla(0,0%,100%,.3)}.wy-nav .wy-menu-vertical header{color:#2980b9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980b9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#fcfcfc}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980b9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:after,.wy-nav-top:before{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:700}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:grey}footer p{margin-bottom:12px}.rst-content footer span.commit tt,footer span.commit .rst-content tt,footer span.commit code{padding:0;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:1em;background:none;border:none;color:grey}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:after,.rst-footer-buttons:before{width:100%;display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:after,.rst-breadcrumbs-buttons:before{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:1px solid #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:1px solid #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:grey;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width:768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-menu.wy-menu-vertical,.wy-side-nav-search,.wy-side-scroll{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width:1100px){.wy-nav-content-wrap{background:rgba(0,0,0,.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,.wy-nav-side,footer{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60;*zoom:1}.rst-versions .rst-current-version:after,.rst-versions .rst-current-version:before{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-content .eqno .rst-versions .rst-current-version .headerlink,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-content p .rst-versions .rst-current-version .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .icon,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-versions .rst-current-version .rst-content .eqno .headerlink,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-versions .rst-current-version .rst-content p .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-versions .rst-current-version .wy-menu-vertical li button.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version button.toctree-expand{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content .toctree-wrapper>p.caption,.rst-content h1,.rst-content h2,.rst-content h3,.rst-content h4,.rst-content h5,.rst-content h6{margin-bottom:24px}.rst-content img{max-width:100%;height:auto}.rst-content div.figure,.rst-content figure{margin-bottom:24px}.rst-content div.figure .caption-text,.rst-content figure .caption-text{font-style:italic}.rst-content div.figure p:last-child.caption,.rst-content figure p:last-child.caption{margin-bottom:0}.rst-content div.figure.align-center,.rst-content figure.align-center{text-align:center}.rst-content .section>a>img,.rst-content .section>img,.rst-content section>a>img,.rst-content section>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"\f08e";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;display:block;overflow:auto}.rst-content div[class^=highlight],.rst-content pre.literal-block{border:1px solid #e1e4e5;overflow-x:auto;margin:1px 0 24px}.rst-content div[class^=highlight] div[class^=highlight],.rst-content pre.literal-block div[class^=highlight]{padding:0;border:none;margin:0}.rst-content div[class^=highlight] td.code{width:100%}.rst-content .linenodiv pre{border-right:1px solid #e6e9ea;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^=highlight] pre{white-space:pre;margin:0;padding:12px;display:block;overflow:auto}.rst-content div[class^=highlight] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content .linenodiv pre,.rst-content div[class^=highlight] pre,.rst-content pre.literal-block{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.4}.rst-content div.highlight .gp,.rst-content div.highlight span.linenos{user-select:none;pointer-events:none}.rst-content div.highlight span.linenos{display:inline-block;padding-left:0;padding-right:12px;margin-right:12px;border-right:1px solid #e6e9ea}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^=highlight],.rst-content div[class^=highlight] pre{white-space:pre-wrap}}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning{clear:both}.rst-content .admonition-todo .last,.rst-content .admonition-todo>:last-child,.rst-content .admonition .last,.rst-content .admonition>:last-child,.rst-content .attention .last,.rst-content .attention>:last-child,.rst-content .caution .last,.rst-content .caution>:last-child,.rst-content .danger .last,.rst-content .danger>:last-child,.rst-content .error .last,.rst-content .error>:last-child,.rst-content .hint .last,.rst-content .hint>:last-child,.rst-content .important .last,.rst-content .important>:last-child,.rst-content .note .last,.rst-content .note>:last-child,.rst-content .seealso .last,.rst-content .seealso>:last-child,.rst-content .tip .last,.rst-content .tip>:last-child,.rst-content .warning .last,.rst-content .warning>:last-child{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent!important;border-color:rgba(0,0,0,.1)!important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha>li,.rst-content .toctree-wrapper ol.loweralpha,.rst-content .toctree-wrapper ol.loweralpha>li,.rst-content section ol.loweralpha,.rst-content section ol.loweralpha>li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha>li,.rst-content .toctree-wrapper ol.upperalpha,.rst-content .toctree-wrapper ol.upperalpha>li,.rst-content section ol.upperalpha,.rst-content section ol.upperalpha>li{list-style:upper-alpha}.rst-content .section ol li>*,.rst-content .section ul li>*,.rst-content .toctree-wrapper ol li>*,.rst-content .toctree-wrapper ul li>*,.rst-content section ol li>*,.rst-content section ul li>*{margin-top:12px;margin-bottom:12px}.rst-content .section ol li>:first-child,.rst-content .section ul li>:first-child,.rst-content .toctree-wrapper ol li>:first-child,.rst-content .toctree-wrapper ul li>:first-child,.rst-content section ol li>:first-child,.rst-content section ul li>:first-child{margin-top:0}.rst-content .section ol li>p,.rst-content .section ol li>p:last-child,.rst-content .section ul li>p,.rst-content .section ul li>p:last-child,.rst-content .toctree-wrapper ol li>p,.rst-content .toctree-wrapper ol li>p:last-child,.rst-content .toctree-wrapper ul li>p,.rst-content .toctree-wrapper ul li>p:last-child,.rst-content section ol li>p,.rst-content section ol li>p:last-child,.rst-content section ul li>p,.rst-content section ul li>p:last-child{margin-bottom:12px}.rst-content .section ol li>p:only-child,.rst-content .section ol li>p:only-child:last-child,.rst-content .section ul li>p:only-child,.rst-content .section ul li>p:only-child:last-child,.rst-content .toctree-wrapper ol li>p:only-child,.rst-content .toctree-wrapper ol li>p:only-child:last-child,.rst-content .toctree-wrapper ul li>p:only-child,.rst-content .toctree-wrapper ul li>p:only-child:last-child,.rst-content section ol li>p:only-child,.rst-content section ol li>p:only-child:last-child,.rst-content section ul li>p:only-child,.rst-content section ul li>p:only-child:last-child{margin-bottom:0}.rst-content .section ol li>ol,.rst-content .section ol li>ul,.rst-content .section ul li>ol,.rst-content .section ul li>ul,.rst-content .toctree-wrapper ol li>ol,.rst-content .toctree-wrapper ol li>ul,.rst-content .toctree-wrapper ul li>ol,.rst-content .toctree-wrapper ul li>ul,.rst-content section ol li>ol,.rst-content section ol li>ul,.rst-content section ul li>ol,.rst-content section ul li>ul{margin-bottom:12px}.rst-content .section ol.simple li>*,.rst-content .section ol.simple li ol,.rst-content .section ol.simple li ul,.rst-content .section ul.simple li>*,.rst-content .section ul.simple li ol,.rst-content .section ul.simple li ul,.rst-content .toctree-wrapper ol.simple li>*,.rst-content .toctree-wrapper ol.simple li ol,.rst-content .toctree-wrapper ol.simple li ul,.rst-content .toctree-wrapper ul.simple li>*,.rst-content .toctree-wrapper ul.simple li ol,.rst-content .toctree-wrapper ul.simple li ul,.rst-content section ol.simple li>*,.rst-content section ol.simple li ol,.rst-content section ol.simple li ul,.rst-content section ul.simple li>*,.rst-content section ul.simple li ol,.rst-content section ul.simple li ul{margin-top:0;margin-bottom:0}.rst-content .line-block{margin-left:0;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0}.rst-content .topic-title{font-weight:700;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0 0 24px 24px}.rst-content .align-left{float:left;margin:0 24px 24px 0}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink{opacity:0;font-size:14px;font-family:FontAwesome;margin-left:.5em}.rst-content .code-block-caption .headerlink:focus,.rst-content .code-block-caption:hover .headerlink,.rst-content .eqno .headerlink:focus,.rst-content .eqno:hover .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink:focus,.rst-content .toctree-wrapper>p.caption:hover .headerlink,.rst-content dl dt .headerlink:focus,.rst-content dl dt:hover .headerlink,.rst-content h1 .headerlink:focus,.rst-content h1:hover .headerlink,.rst-content h2 .headerlink:focus,.rst-content h2:hover .headerlink,.rst-content h3 .headerlink:focus,.rst-content h3:hover .headerlink,.rst-content h4 .headerlink:focus,.rst-content h4:hover .headerlink,.rst-content h5 .headerlink:focus,.rst-content h5:hover .headerlink,.rst-content h6 .headerlink:focus,.rst-content h6:hover .headerlink,.rst-content p.caption .headerlink:focus,.rst-content p.caption:hover .headerlink,.rst-content p .headerlink:focus,.rst-content p:hover .headerlink,.rst-content table>caption .headerlink:focus,.rst-content table>caption:hover .headerlink{opacity:1}.rst-content p a{overflow-wrap:anywhere}.rst-content .wy-table td p,.rst-content .wy-table td ul,.rst-content .wy-table th p,.rst-content .wy-table th ul,.rst-content table.docutils td p,.rst-content table.docutils td ul,.rst-content table.docutils th p,.rst-content table.docutils th ul,.rst-content table.field-list td p,.rst-content table.field-list td ul,.rst-content table.field-list th p,.rst-content table.field-list th ul{font-size:inherit}.rst-content .btn:focus{outline:2px solid}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:1px solid #e1e4e5}.rst-content .sidebar dl,.rst-content .sidebar p,.rst-content .sidebar ul{font-size:90%}.rst-content .sidebar .last,.rst-content .sidebar>:last-child{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif;font-weight:700;background:#e1e4e5;padding:6px 12px;margin:-24px -24px 24px;font-size:100%}.rst-content .highlighted{background:#f1c40f;box-shadow:0 0 0 2px #f1c40f;display:inline;font-weight:700}.rst-content .citation-reference,.rst-content .footnote-reference{vertical-align:baseline;position:relative;top:-.4em;line-height:0;font-size:90%}.rst-content .citation-reference>span.fn-bracket,.rst-content .footnote-reference>span.fn-bracket{display:none}.rst-content .hlist{width:100%}.rst-content dl dt span.classifier:before{content:" : "}.rst-content dl dt span.classifier-delimiter{display:none!important}html.writer-html4 .rst-content table.docutils.citation,html.writer-html4 .rst-content table.docutils.footnote{background:none;border:none}html.writer-html4 .rst-content table.docutils.citation td,html.writer-html4 .rst-content table.docutils.citation tr,html.writer-html4 .rst-content table.docutils.footnote td,html.writer-html4 .rst-content table.docutils.footnote tr{border:none;background-color:transparent!important;white-space:normal}html.writer-html4 .rst-content table.docutils.citation td.label,html.writer-html4 .rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{display:grid;grid-template-columns:auto minmax(80%,95%)}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{display:inline-grid;grid-template-columns:max-content auto}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{display:grid;grid-template-columns:auto auto minmax(.65rem,auto) minmax(40%,95%)}html.writer-html5 .rst-content aside.citation>span.label,html.writer-html5 .rst-content aside.footnote>span.label,html.writer-html5 .rst-content div.citation>span.label{grid-column-start:1;grid-column-end:2}html.writer-html5 .rst-content aside.citation>span.backrefs,html.writer-html5 .rst-content aside.footnote>span.backrefs,html.writer-html5 .rst-content div.citation>span.backrefs{grid-column-start:2;grid-column-end:3;grid-row-start:1;grid-row-end:3}html.writer-html5 .rst-content aside.citation>p,html.writer-html5 .rst-content aside.footnote>p,html.writer-html5 .rst-content div.citation>p{grid-column-start:4;grid-column-end:5}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{margin-bottom:24px}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{padding-left:1rem}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dd,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dd,html.writer-html5 .rst-content dl.footnote>dt{margin-bottom:0}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{font-size:.9rem}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.footnote>dt{margin:0 .5rem .5rem 0;line-height:1.2rem;word-break:break-all;font-weight:400}html.writer-html5 .rst-content dl.citation>dt>span.brackets:before,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:before{content:"["}html.writer-html5 .rst-content dl.citation>dt>span.brackets:after,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:after{content:"]"}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a{word-break:keep-all}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a:not(:first-child):before,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.footnote>dd{margin:0 0 .5rem;line-height:1.2rem}html.writer-html5 .rst-content dl.citation>dd p,html.writer-html5 .rst-content dl.footnote>dd p{font-size:.9rem}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{padding-left:1rem;padding-right:1rem;font-size:.9rem;line-height:1.2rem}html.writer-html5 .rst-content aside.citation p,html.writer-html5 .rst-content aside.footnote p,html.writer-html5 .rst-content div.citation p{font-size:.9rem;line-height:1.2rem;margin-bottom:12px}html.writer-html5 .rst-content aside.citation span.backrefs,html.writer-html5 .rst-content aside.footnote span.backrefs,html.writer-html5 .rst-content div.citation span.backrefs{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content aside.citation span.backrefs>a,html.writer-html5 .rst-content aside.footnote span.backrefs>a,html.writer-html5 .rst-content div.citation span.backrefs>a{word-break:keep-all}html.writer-html5 .rst-content aside.citation span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content aside.footnote span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content div.citation span.backrefs>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content aside.citation span.label,html.writer-html5 .rst-content aside.footnote span.label,html.writer-html5 .rst-content div.citation span.label{line-height:1.2rem}html.writer-html5 .rst-content aside.citation-list,html.writer-html5 .rst-content aside.footnote-list,html.writer-html5 .rst-content div.citation-list{margin-bottom:24px}html.writer-html5 .rst-content dl.option-list kbd{font-size:.9rem}.rst-content table.docutils.footnote,html.writer-html4 .rst-content table.docutils.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content aside.footnote-list aside.footnote,html.writer-html5 .rst-content div.citation-list>div.citation,html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{color:grey}.rst-content table.docutils.footnote code,.rst-content table.docutils.footnote tt,html.writer-html4 .rst-content table.docutils.citation code,html.writer-html4 .rst-content table.docutils.citation tt,html.writer-html5 .rst-content aside.footnote-list aside.footnote code,html.writer-html5 .rst-content aside.footnote-list aside.footnote tt,html.writer-html5 .rst-content aside.footnote code,html.writer-html5 .rst-content aside.footnote tt,html.writer-html5 .rst-content div.citation-list>div.citation code,html.writer-html5 .rst-content div.citation-list>div.citation tt,html.writer-html5 .rst-content dl.citation code,html.writer-html5 .rst-content dl.citation tt,html.writer-html5 .rst-content dl.footnote code,html.writer-html5 .rst-content dl.footnote tt{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e1e4e5}html.writer-html5 .rst-content table.docutils th{border:1px solid #e1e4e5}html.writer-html5 .rst-content table.docutils td>p,html.writer-html5 .rst-content table.docutils th>p{line-height:1rem;margin-bottom:0;font-size:.9rem}.rst-content table.docutils td .last,.rst-content table.docutils td .last>:last-child{margin-bottom:0}.rst-content table.field-list,.rst-content table.field-list td{border:none}.rst-content table.field-list td p{line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content code,.rst-content tt{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;padding:2px 5px}.rst-content code big,.rst-content code em,.rst-content tt big,.rst-content tt em{font-size:100%!important;line-height:normal}.rst-content code.literal,.rst-content tt.literal{color:#e74c3c;white-space:normal}.rst-content code.xref,.rst-content tt.xref,a .rst-content code,a .rst-content tt{font-weight:700;color:#404040;overflow-wrap:normal}.rst-content kbd,.rst-content pre,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace}.rst-content a code,.rst-content a tt{color:#2980b9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:700;margin-bottom:12px}.rst-content dl ol,.rst-content dl p,.rst-content dl table,.rst-content dl ul{margin-bottom:12px}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}.rst-content dl dd>ol:last-child,.rst-content dl dd>p:last-child,.rst-content dl dd>table:last-child,.rst-content dl dd>ul:last-child{margin-bottom:0}html.writer-html4 .rst-content dl:not(.docutils),html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple){margin-bottom:24px}html.writer-html4 .rst-content dl:not(.docutils)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980b9;border-top:3px solid #6ab0de;padding:6px;position:relative}html.writer-html4 .rst-content dl:not(.docutils)>dt:before,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:before{color:#6ab0de}html.writer-html4 .rst-content dl:not(.docutils)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{margin-bottom:6px;border:none;border-left:3px solid #ccc;background:#f0f0f0;color:#555}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils)>dt:first-child,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:first-child{margin-top:0}html.writer-html4 .rst-content dl:not(.docutils) code.descclassname,html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descclassname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{background-color:transparent;border:none;padding:0;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .optional,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .property,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .property{display:inline-block;padding-right:8px;max-width:100%}html.writer-html4 .rst-content dl:not(.docutils) .k,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .k{font-style:italic}html.writer-html4 .rst-content dl:not(.docutils) .descclassname,html.writer-html4 .rst-content dl:not(.docutils) .descname,html.writer-html4 .rst-content dl:not(.docutils) .sig-name,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .sig-name{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#000}.rst-content .viewcode-back,.rst-content .viewcode-link{display:inline-block;color:#27ae60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:700}.rst-content code.download,.rst-content tt.download{background:inherit;padding:inherit;font-weight:400;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content code.download span:first-child,.rst-content tt.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{margin-right:4px}.rst-content .guilabel,.rst-content .menuselection{font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .guilabel,.rst-content .menuselection{border:1px solid #7fbbe3;background:#e7f2fa}.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>.kbd,.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>kbd{color:inherit;font-size:80%;background-color:#fff;border:1px solid #a6a6a6;border-radius:4px;box-shadow:0 2px grey;padding:2.4px 6px;margin:auto 0}.rst-content .versionmodified{font-style:italic}@media screen and (max-width:480px){.rst-content .sidebar{width:100%}}span[id*=MathJax-Span]{color:#404040}.math{text-align:center}@font-face{font-family:Lato;src:url(fonts/lato-normal.woff2?bd03a2cc277bbbc338d464e679fe9942) format("woff2"),url(fonts/lato-normal.woff?27bd77b9162d388cb8d4c4217c7c5e2a) format("woff");font-weight:400;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold.woff2?cccb897485813c7c256901dbca54ecf2) format("woff2"),url(fonts/lato-bold.woff?d878b6c29b10beca227e9eef4246111b) format("woff");font-weight:700;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold-italic.woff2?0b6bb6725576b072c5d0b02ecdd1900d) format("woff2"),url(fonts/lato-bold-italic.woff?9c7e4e9eb485b4a121c760e61bc3707c) format("woff");font-weight:700;font-style:italic;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-normal-italic.woff2?4eb103b4d12be57cb1d040ed5e162e9d) format("woff2"),url(fonts/lato-normal-italic.woff?f28f2d6482446544ef1ea1ccc6dd5892) format("woff");font-weight:400;font-style:italic;font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:400;src:url(fonts/Roboto-Slab-Regular.woff2?7abf5b8d04d26a2cafea937019bca958) format("woff2"),url(fonts/Roboto-Slab-Regular.woff?c1be9284088d487c5e3ff0a10a92e58c) format("woff");font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:700;src:url(fonts/Roboto-Slab-Bold.woff2?9984f4a9bda09be08e83f2506954adbe) format("woff2"),url(fonts/Roboto-Slab-Bold.woff?bed5564a116b05148e3b3bea6fb1162a) format("woff");font-display:block} \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/doctools.js b/docs/$READTHEDOCS_OUTPUT/html/_static/doctools.js deleted file mode 100644 index d06a71d..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/doctools.js +++ /dev/null @@ -1,156 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Base JavaScript utilities for all Sphinx HTML documentation. - * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ -"use strict"; - -const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ - "TEXTAREA", - "INPUT", - "SELECT", - "BUTTON", -]); - -const _ready = (callback) => { - if (document.readyState !== "loading") { - callback(); - } else { - document.addEventListener("DOMContentLoaded", callback); - } -}; - -/** - * Small JavaScript module for the documentation. - */ -const Documentation = { - init: () => { - Documentation.initDomainIndexTable(); - Documentation.initOnKeyListeners(); - }, - - /** - * i18n support - */ - TRANSLATIONS: {}, - PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), - LOCALE: "unknown", - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext: (string) => { - const translated = Documentation.TRANSLATIONS[string]; - switch (typeof translated) { - case "undefined": - return string; // no translation - case "string": - return translated; // translation exists - default: - return translated[0]; // (singular, plural) translation tuple exists - } - }, - - ngettext: (singular, plural, n) => { - const translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated !== "undefined") - return translated[Documentation.PLURAL_EXPR(n)]; - return n === 1 ? singular : plural; - }, - - addTranslations: (catalog) => { - Object.assign(Documentation.TRANSLATIONS, catalog.messages); - Documentation.PLURAL_EXPR = new Function( - "n", - `return (${catalog.plural_expr})` - ); - Documentation.LOCALE = catalog.locale; - }, - - /** - * helper function to focus on search bar - */ - focusSearchBar: () => { - document.querySelectorAll("input[name=q]")[0]?.focus(); - }, - - /** - * Initialise the domain index toggle buttons - */ - initDomainIndexTable: () => { - const toggler = (el) => { - const idNumber = el.id.substr(7); - const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); - if (el.src.substr(-9) === "minus.png") { - el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; - toggledRows.forEach((el) => (el.style.display = "none")); - } else { - el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; - toggledRows.forEach((el) => (el.style.display = "")); - } - }; - - const togglerElements = document.querySelectorAll("img.toggler"); - togglerElements.forEach((el) => - el.addEventListener("click", (event) => toggler(event.currentTarget)) - ); - togglerElements.forEach((el) => (el.style.display = "")); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); - }, - - initOnKeyListeners: () => { - // only install a listener if it is really needed - if ( - !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && - !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS - ) - return; - - document.addEventListener("keydown", (event) => { - // bail for input elements - if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; - // bail with special keys - if (event.altKey || event.ctrlKey || event.metaKey) return; - - if (!event.shiftKey) { - switch (event.key) { - case "ArrowLeft": - if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; - - const prevLink = document.querySelector('link[rel="prev"]'); - if (prevLink && prevLink.href) { - window.location.href = prevLink.href; - event.preventDefault(); - } - break; - case "ArrowRight": - if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; - - const nextLink = document.querySelector('link[rel="next"]'); - if (nextLink && nextLink.href) { - window.location.href = nextLink.href; - event.preventDefault(); - } - break; - } - } - - // some keyboard layouts may need Shift to get / - switch (event.key) { - case "/": - if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; - Documentation.focusSearchBar(); - event.preventDefault(); - } - }); - }, -}; - -// quick alias for translations -const _ = Documentation.gettext; - -_ready(Documentation.init); diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/documentation_options.js b/docs/$READTHEDOCS_OUTPUT/html/_static/documentation_options.js deleted file mode 100644 index 178e44a..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/documentation_options.js +++ /dev/null @@ -1,14 +0,0 @@ -var DOCUMENTATION_OPTIONS = { - URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '06.05.2024', - LANGUAGE: 'en', - COLLAPSE_INDEX: false, - BUILDER: 'html', - FILE_SUFFIX: '.html', - LINK_SUFFIX: '.html', - HAS_SOURCE: true, - SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: false, - SHOW_SEARCH_SUMMARY: true, - ENABLE_SEARCH_SHORTCUTS: true, -}; \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/file.png b/docs/$READTHEDOCS_OUTPUT/html/_static/file.png deleted file mode 100644 index a858a41..0000000 Binary files a/docs/$READTHEDOCS_OUTPUT/html/_static/file.png and /dev/null differ diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/jquery.js b/docs/$READTHEDOCS_OUTPUT/html/_static/jquery.js deleted file mode 100644 index c4c6022..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/jquery.js +++ /dev/null @@ -1,2 +0,0 @@ -/*! jQuery v3.6.0 | (c) OpenJS Foundation and other contributors | jquery.org/license */ -!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],r=Object.getPrototypeOf,s=t.slice,g=t.flat?function(e){return t.flat.call(e)}:function(e){return t.concat.apply([],e)},u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType&&"function"!=typeof e.item},x=function(e){return null!=e&&e===e.window},E=C.document,c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.6.0",S=function(e,t){return new S.fn.init(e,t)};function p(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp(F),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+F),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(p.childNodes),p.childNodes),t[p.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!N[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&(U.test(t)||z.test(t))){(f=ee.test(t)&&ye(e.parentNode)||e)===e&&d.scope||((s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=S)),o=(l=h(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+xe(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){N(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return g(t.replace($,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e&&e.namespaceURI,n=e&&(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:p;return r!=C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),p!=C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.scope=ce(function(e){return a.appendChild(e).appendChild(C.createElement("div")),"undefined"!=typeof e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length}),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=S,!C.getElementsByName||!C.getElementsByName(S).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){var t;a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push("\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},j=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||D,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,D=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="",y.option=!!ce.lastChild;var ge={thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n",""]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function je(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function De(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function qe(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Le(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var _t,zt=[],Ut=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=zt.pop()||S.expando+"_"+wt.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Ut.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Ut.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Ut,"$1"+r):!1!==e.jsonp&&(e.url+=(Tt.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,zt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((_t=E.implementation.createHTMLDocument("").body).innerHTML="
",2===_t.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=Fe(y.pixelPosition,function(e,t){if(t)return t=We(e,n),Pe.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=y.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=y.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),y.elements=c+" "+a,j(b)}function f(a){var b=x[a[v]];return b||(b={},w++,a[v]=w,x[w]=b),b}function g(a,c,d){if(c||(c=b),q)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():u.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||t.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),q)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return y.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(y,b.frag)}function j(a){a||(a=b);var d=f(a);return!y.shivCSS||p||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),q||i(a,d),a}function k(a){for(var b,c=a.getElementsByTagName("*"),e=c.length,f=RegExp("^(?:"+d().join("|")+")$","i"),g=[];e--;)b=c[e],f.test(b.nodeName)&&g.push(b.applyElement(l(b)));return g}function l(a){for(var b,c=a.attributes,d=c.length,e=a.ownerDocument.createElement(A+":"+a.nodeName);d--;)b=c[d],b.specified&&e.setAttribute(b.nodeName,b.nodeValue);return e.style.cssText=a.style.cssText,e}function m(a){for(var b,c=a.split("{"),e=c.length,f=RegExp("(^|[\\s,>+~])("+d().join("|")+")(?=[[\\s,>+~#.:]|$)","gi"),g="$1"+A+"\\:$2";e--;)b=c[e]=c[e].split("}"),b[b.length-1]=b[b.length-1].replace(f,g),c[e]=b.join("}");return c.join("{")}function n(a){for(var b=a.length;b--;)a[b].removeNode()}function o(a){function b(){clearTimeout(g._removeSheetTimer),d&&d.removeNode(!0),d=null}var d,e,g=f(a),h=a.namespaces,i=a.parentWindow;return!B||a.printShived?a:("undefined"==typeof h[A]&&h.add(A),i.attachEvent("onbeforeprint",function(){b();for(var f,g,h,i=a.styleSheets,j=[],l=i.length,n=Array(l);l--;)n[l]=i[l];for(;h=n.pop();)if(!h.disabled&&z.test(h.media)){try{f=h.imports,g=f.length}catch(o){g=0}for(l=0;g>l;l++)n.push(f[l]);try{j.push(h.cssText)}catch(o){}}j=m(j.reverse().join("")),e=k(a),d=c(a,j)}),i.attachEvent("onafterprint",function(){n(e),clearTimeout(g._removeSheetTimer),g._removeSheetTimer=setTimeout(b,500)}),a.printShived=!0,a)}var p,q,r="3.7.3",s=a.html5||{},t=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,u=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,v="_html5shiv",w=0,x={};!function(){try{var a=b.createElement("a");a.innerHTML="",p="hidden"in a,q=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){p=!0,q=!0}}();var y={elements:s.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:r,shivCSS:s.shivCSS!==!1,supportsUnknownElements:q,shivMethods:s.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=y,j(b);var z=/^$|\b(?:all|print)\b/,A="html5shiv",B=!q&&function(){var c=b.documentElement;return!("undefined"==typeof b.namespaces||"undefined"==typeof b.parentWindow||"undefined"==typeof c.applyElement||"undefined"==typeof c.removeNode||"undefined"==typeof a.attachEvent)}();y.type+=" print",y.shivPrint=o,o(b),"object"==typeof module&&module.exports&&(module.exports=y)}("undefined"!=typeof window?window:this,document); \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/js/html5shiv.min.js b/docs/$READTHEDOCS_OUTPUT/html/_static/js/html5shiv.min.js deleted file mode 100644 index cd1c674..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/js/html5shiv.min.js +++ /dev/null @@ -1,4 +0,0 @@ -/** -* @preserve HTML5 Shiv 3.7.3 | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed -*/ -!function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=t.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=t.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),t.elements=c+" "+a,j(b)}function f(a){var b=s[a[q]];return b||(b={},r++,a[q]=r,s[r]=b),b}function g(a,c,d){if(c||(c=b),l)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():p.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||o.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),l)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return t.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(t,b.frag)}function j(a){a||(a=b);var d=f(a);return!t.shivCSS||k||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||i(a,d),a}var k,l,m="3.7.3-pre",n=a.html5||{},o=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,q="_html5shiv",r=0,s={};!function(){try{var a=b.createElement("a");a.innerHTML="",k="hidden"in a,l=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){k=!0,l=!0}}();var t={elements:n.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:m,shivCSS:n.shivCSS!==!1,supportsUnknownElements:l,shivMethods:n.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=t,j(b),"object"==typeof module&&module.exports&&(module.exports=t)}("undefined"!=typeof window?window:this,document); \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/js/theme.js b/docs/$READTHEDOCS_OUTPUT/html/_static/js/theme.js deleted file mode 100644 index 1fddb6e..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/js/theme.js +++ /dev/null @@ -1 +0,0 @@ -!function(n){var e={};function t(i){if(e[i])return e[i].exports;var o=e[i]={i:i,l:!1,exports:{}};return n[i].call(o.exports,o,o.exports,t),o.l=!0,o.exports}t.m=n,t.c=e,t.d=function(n,e,i){t.o(n,e)||Object.defineProperty(n,e,{enumerable:!0,get:i})},t.r=function(n){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(n,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(n,"__esModule",{value:!0})},t.t=function(n,e){if(1&e&&(n=t(n)),8&e)return n;if(4&e&&"object"==typeof n&&n&&n.__esModule)return n;var i=Object.create(null);if(t.r(i),Object.defineProperty(i,"default",{enumerable:!0,value:n}),2&e&&"string"!=typeof n)for(var o in n)t.d(i,o,function(e){return n[e]}.bind(null,o));return i},t.n=function(n){var e=n&&n.__esModule?function(){return n.default}:function(){return n};return t.d(e,"a",e),e},t.o=function(n,e){return Object.prototype.hasOwnProperty.call(n,e)},t.p="",t(t.s=0)}([function(n,e,t){t(1),n.exports=t(3)},function(n,e,t){(function(){var e="undefined"!=typeof window?window.jQuery:t(2);n.exports.ThemeNav={navBar:null,win:null,winScroll:!1,winResize:!1,linkScroll:!1,winPosition:0,winHeight:null,docHeight:null,isRunning:!1,enable:function(n){var t=this;void 0===n&&(n=!0),t.isRunning||(t.isRunning=!0,e((function(e){t.init(e),t.reset(),t.win.on("hashchange",t.reset),n&&t.win.on("scroll",(function(){t.linkScroll||t.winScroll||(t.winScroll=!0,requestAnimationFrame((function(){t.onScroll()})))})),t.win.on("resize",(function(){t.winResize||(t.winResize=!0,requestAnimationFrame((function(){t.onResize()})))})),t.onResize()})))},enableSticky:function(){this.enable(!0)},init:function(n){n(document);var e=this;this.navBar=n("div.wy-side-scroll:first"),this.win=n(window),n(document).on("click","[data-toggle='wy-nav-top']",(function(){n("[data-toggle='wy-nav-shift']").toggleClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift")})).on("click",".wy-menu-vertical .current ul li a",(function(){var t=n(this);n("[data-toggle='wy-nav-shift']").removeClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift"),e.toggleCurrent(t),e.hashChange()})).on("click","[data-toggle='rst-current-version']",(function(){n("[data-toggle='rst-versions']").toggleClass("shift-up")})),n("table.docutils:not(.field-list,.footnote,.citation)").wrap("
"),n("table.docutils.footnote").wrap("
"),n("table.docutils.citation").wrap("
"),n(".wy-menu-vertical ul").not(".simple").siblings("a").each((function(){var t=n(this);expand=n(''),expand.on("click",(function(n){return e.toggleCurrent(t),n.stopPropagation(),!1})),t.prepend(expand)}))},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),t=e.find('[href="'+n+'"]');if(0===t.length){var i=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(t=e.find('[href="#'+i.attr("id")+'"]')).length&&(t=e.find('[href="#"]'))}if(t.length>0){$(".wy-menu-vertical .current").removeClass("current").attr("aria-expanded","false"),t.addClass("current").attr("aria-expanded","true"),t.closest("li.toctree-l1").parent().addClass("current").attr("aria-expanded","true");for(let n=1;n<=10;n++)t.closest("li.toctree-l"+n).addClass("current").attr("aria-expanded","true");t[0].scrollIntoView()}}catch(n){console.log("Error expanding nav for anchor",n)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,t=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(t),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",(function(){this.linkScroll=!1}))},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current").attr("aria-expanded","false"),e.siblings().find("li.current").removeClass("current").attr("aria-expanded","false");var t=e.find("> ul li");t.length&&(t.removeClass("current").attr("aria-expanded","false"),e.toggleClass("current").attr("aria-expanded",(function(n,e){return"true"==e?"false":"true"})))}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:n.exports.ThemeNav,StickyNav:n.exports.ThemeNav}),function(){for(var n=0,e=["ms","moz","webkit","o"],t=0;t0 - var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 - var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 - var s_v = "^(" + C + ")?" + v; // vowel in stem - - this.stemWord = function (w) { - var stem; - var suffix; - var firstch; - var origword = w; - - if (w.length < 3) - return w; - - var re; - var re2; - var re3; - var re4; - - firstch = w.substr(0,1); - if (firstch == "y") - w = firstch.toUpperCase() + w.substr(1); - - // Step 1a - re = /^(.+?)(ss|i)es$/; - re2 = /^(.+?)([^s])s$/; - - if (re.test(w)) - w = w.replace(re,"$1$2"); - else if (re2.test(w)) - w = w.replace(re2,"$1$2"); - - // Step 1b - re = /^(.+?)eed$/; - re2 = /^(.+?)(ed|ing)$/; - if (re.test(w)) { - var fp = re.exec(w); - re = new RegExp(mgr0); - if (re.test(fp[1])) { - re = /.$/; - w = w.replace(re,""); - } - } - else if (re2.test(w)) { - var fp = re2.exec(w); - stem = fp[1]; - re2 = new RegExp(s_v); - if (re2.test(stem)) { - w = stem; - re2 = /(at|bl|iz)$/; - re3 = new RegExp("([^aeiouylsz])\\1$"); - re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); - if (re2.test(w)) - w = w + "e"; - else if (re3.test(w)) { - re = /.$/; - w = w.replace(re,""); - } - else if (re4.test(w)) - w = w + "e"; - } - } - - // Step 1c - re = /^(.+?)y$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(s_v); - if (re.test(stem)) - w = stem + "i"; - } - - // Step 2 - re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - suffix = fp[2]; - re = new RegExp(mgr0); - if (re.test(stem)) - w = stem + step2list[suffix]; - } - - // Step 3 - re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - suffix = fp[2]; - re = new RegExp(mgr0); - if (re.test(stem)) - w = stem + step3list[suffix]; - } - - // Step 4 - re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; - re2 = /^(.+?)(s|t)(ion)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(mgr1); - if (re.test(stem)) - w = stem; - } - else if (re2.test(w)) { - var fp = re2.exec(w); - stem = fp[1] + fp[2]; - re2 = new RegExp(mgr1); - if (re2.test(stem)) - w = stem; - } - - // Step 5 - re = /^(.+?)e$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(mgr1); - re2 = new RegExp(meq1); - re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); - if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) - w = stem; - } - re = /ll$/; - re2 = new RegExp(mgr1); - if (re.test(w) && re2.test(w)) { - re = /.$/; - w = w.replace(re,""); - } - - // and turn initial Y back to y - if (firstch == "y") - w = firstch.toLowerCase() + w.substr(1); - return w; - } -} - diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/minus.png b/docs/$READTHEDOCS_OUTPUT/html/_static/minus.png deleted file mode 100644 index d96755f..0000000 Binary files a/docs/$READTHEDOCS_OUTPUT/html/_static/minus.png and /dev/null differ diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/nbsphinx-broken-thumbnail.svg b/docs/$READTHEDOCS_OUTPUT/html/_static/nbsphinx-broken-thumbnail.svg deleted file mode 100644 index 4919ca8..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/nbsphinx-broken-thumbnail.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/nbsphinx-code-cells.css b/docs/$READTHEDOCS_OUTPUT/html/_static/nbsphinx-code-cells.css deleted file mode 100644 index a3fb27c..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/nbsphinx-code-cells.css +++ /dev/null @@ -1,259 +0,0 @@ -/* remove conflicting styling from Sphinx themes */ -div.nbinput.container div.prompt *, -div.nboutput.container div.prompt *, -div.nbinput.container div.input_area pre, -div.nboutput.container div.output_area pre, -div.nbinput.container div.input_area .highlight, -div.nboutput.container div.output_area .highlight { - border: none; - padding: 0; - margin: 0; - box-shadow: none; -} - -div.nbinput.container > div[class*=highlight], -div.nboutput.container > div[class*=highlight] { - margin: 0; -} - -div.nbinput.container div.prompt *, -div.nboutput.container div.prompt * { - background: none; -} - -div.nboutput.container div.output_area .highlight, -div.nboutput.container div.output_area pre { - background: unset; -} - -div.nboutput.container div.output_area div.highlight { - color: unset; /* override Pygments text color */ -} - -/* avoid gaps between output lines */ -div.nboutput.container div[class*=highlight] pre { - line-height: normal; -} - -/* input/output containers */ -div.nbinput.container, -div.nboutput.container { - display: -webkit-flex; - display: flex; - align-items: flex-start; - margin: 0; - width: 100%; -} -@media (max-width: 540px) { - div.nbinput.container, - div.nboutput.container { - flex-direction: column; - } -} - -/* input container */ -div.nbinput.container { - padding-top: 5px; -} - -/* last container */ -div.nblast.container { - padding-bottom: 5px; -} - -/* input prompt */ -div.nbinput.container div.prompt pre, -/* for sphinx_immaterial theme: */ -div.nbinput.container div.prompt pre > code { - color: #307FC1; -} - -/* output prompt */ -div.nboutput.container div.prompt pre, -/* for sphinx_immaterial theme: */ -div.nboutput.container div.prompt pre > code { - color: #BF5B3D; -} - -/* all prompts */ -div.nbinput.container div.prompt, -div.nboutput.container div.prompt { - width: 4.5ex; - padding-top: 5px; - position: relative; - user-select: none; -} - -div.nbinput.container div.prompt > div, -div.nboutput.container div.prompt > div { - position: absolute; - right: 0; - margin-right: 0.3ex; -} - -@media (max-width: 540px) { - div.nbinput.container div.prompt, - div.nboutput.container div.prompt { - width: unset; - text-align: left; - padding: 0.4em; - } - div.nboutput.container div.prompt.empty { - padding: 0; - } - - div.nbinput.container div.prompt > div, - div.nboutput.container div.prompt > div { - position: unset; - } -} - -/* disable scrollbars and line breaks on prompts */ -div.nbinput.container div.prompt pre, -div.nboutput.container div.prompt pre { - overflow: hidden; - white-space: pre; -} - -/* input/output area */ -div.nbinput.container div.input_area, -div.nboutput.container div.output_area { - -webkit-flex: 1; - flex: 1; - overflow: auto; -} -@media (max-width: 540px) { - div.nbinput.container div.input_area, - div.nboutput.container div.output_area { - width: 100%; - } -} - -/* input area */ -div.nbinput.container div.input_area { - border: 1px solid #e0e0e0; - border-radius: 2px; - /*background: #f5f5f5;*/ -} - -/* override MathJax center alignment in output cells */ -div.nboutput.container div[class*=MathJax] { - text-align: left !important; -} - -/* override sphinx.ext.imgmath center alignment in output cells */ -div.nboutput.container div.math p { - text-align: left; -} - -/* standard error */ -div.nboutput.container div.output_area.stderr { - background: #fdd; -} - -/* ANSI colors */ -.ansi-black-fg { color: #3E424D; } -.ansi-black-bg { background-color: #3E424D; } -.ansi-black-intense-fg { color: #282C36; } -.ansi-black-intense-bg { background-color: #282C36; } -.ansi-red-fg { color: #E75C58; } -.ansi-red-bg { background-color: #E75C58; } -.ansi-red-intense-fg { color: #B22B31; } -.ansi-red-intense-bg { background-color: #B22B31; } -.ansi-green-fg { color: #00A250; } -.ansi-green-bg { background-color: #00A250; } -.ansi-green-intense-fg { color: #007427; } -.ansi-green-intense-bg { background-color: #007427; } -.ansi-yellow-fg { color: #DDB62B; } -.ansi-yellow-bg { background-color: #DDB62B; } -.ansi-yellow-intense-fg { color: #B27D12; } -.ansi-yellow-intense-bg { background-color: #B27D12; } -.ansi-blue-fg { color: #208FFB; } -.ansi-blue-bg { background-color: #208FFB; } -.ansi-blue-intense-fg { color: #0065CA; } -.ansi-blue-intense-bg { background-color: #0065CA; } -.ansi-magenta-fg { color: #D160C4; } -.ansi-magenta-bg { background-color: #D160C4; } -.ansi-magenta-intense-fg { color: #A03196; } -.ansi-magenta-intense-bg { background-color: #A03196; } -.ansi-cyan-fg { color: #60C6C8; } -.ansi-cyan-bg { background-color: #60C6C8; } -.ansi-cyan-intense-fg { color: #258F8F; } -.ansi-cyan-intense-bg { background-color: #258F8F; } -.ansi-white-fg { color: #C5C1B4; } -.ansi-white-bg { background-color: #C5C1B4; } -.ansi-white-intense-fg { color: #A1A6B2; } -.ansi-white-intense-bg { background-color: #A1A6B2; } - -.ansi-default-inverse-fg { color: #FFFFFF; } -.ansi-default-inverse-bg { background-color: #000000; } - -.ansi-bold { font-weight: bold; } -.ansi-underline { text-decoration: underline; } - - -div.nbinput.container div.input_area div[class*=highlight] > pre, -div.nboutput.container div.output_area div[class*=highlight] > pre, -div.nboutput.container div.output_area div[class*=highlight].math, -div.nboutput.container div.output_area.rendered_html, -div.nboutput.container div.output_area > div.output_javascript, -div.nboutput.container div.output_area:not(.rendered_html) > img{ - padding: 5px; - margin: 0; -} - -/* fix copybtn overflow problem in chromium (needed for 'sphinx_copybutton') */ -div.nbinput.container div.input_area > div[class^='highlight'], -div.nboutput.container div.output_area > div[class^='highlight']{ - overflow-y: hidden; -} - -/* hide copy button on prompts for 'sphinx_copybutton' extension ... */ -.prompt .copybtn, -/* ... and 'sphinx_immaterial' theme */ -.prompt .md-clipboard.md-icon { - display: none; -} - -/* Some additional styling taken form the Jupyter notebook CSS */ -.jp-RenderedHTMLCommon table, -div.rendered_html table { - border: none; - border-collapse: collapse; - border-spacing: 0; - color: black; - font-size: 12px; - table-layout: fixed; -} -.jp-RenderedHTMLCommon thead, -div.rendered_html thead { - border-bottom: 1px solid black; - vertical-align: bottom; -} -.jp-RenderedHTMLCommon tr, -.jp-RenderedHTMLCommon th, -.jp-RenderedHTMLCommon td, -div.rendered_html tr, -div.rendered_html th, -div.rendered_html td { - text-align: right; - vertical-align: middle; - padding: 0.5em 0.5em; - line-height: normal; - white-space: normal; - max-width: none; - border: none; -} -.jp-RenderedHTMLCommon th, -div.rendered_html th { - font-weight: bold; -} -.jp-RenderedHTMLCommon tbody tr:nth-child(odd), -div.rendered_html tbody tr:nth-child(odd) { - background: #f5f5f5; -} -.jp-RenderedHTMLCommon tbody tr:hover, -div.rendered_html tbody tr:hover { - background: rgba(66, 165, 245, 0.2); -} - diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/nbsphinx-gallery.css b/docs/$READTHEDOCS_OUTPUT/html/_static/nbsphinx-gallery.css deleted file mode 100644 index 365c27a..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/nbsphinx-gallery.css +++ /dev/null @@ -1,31 +0,0 @@ -.nbsphinx-gallery { - display: grid; - grid-template-columns: repeat(auto-fill, minmax(160px, 1fr)); - gap: 5px; - margin-top: 1em; - margin-bottom: 1em; -} - -.nbsphinx-gallery > a { - padding: 5px; - border: 1px dotted currentColor; - border-radius: 2px; - text-align: center; -} - -.nbsphinx-gallery > a:hover { - border-style: solid; -} - -.nbsphinx-gallery img { - max-width: 100%; - max-height: 100%; -} - -.nbsphinx-gallery > a > div:first-child { - display: flex; - align-items: start; - justify-content: center; - height: 120px; - margin-bottom: 5px; -} diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/nbsphinx-no-thumbnail.svg b/docs/$READTHEDOCS_OUTPUT/html/_static/nbsphinx-no-thumbnail.svg deleted file mode 100644 index 9dca758..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/nbsphinx-no-thumbnail.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/plus.png b/docs/$READTHEDOCS_OUTPUT/html/_static/plus.png deleted file mode 100644 index 7107cec..0000000 Binary files a/docs/$READTHEDOCS_OUTPUT/html/_static/plus.png and /dev/null differ diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/pygments.css b/docs/$READTHEDOCS_OUTPUT/html/_static/pygments.css deleted file mode 100644 index 84ab303..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/pygments.css +++ /dev/null @@ -1,75 +0,0 @@ -pre { line-height: 125%; } -td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } -span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } -td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } -span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } -.highlight .hll { background-color: #ffffcc } -.highlight { background: #f8f8f8; } -.highlight .c { color: #3D7B7B; font-style: italic } /* Comment */ -.highlight .err { border: 1px solid #FF0000 } /* Error */ -.highlight .k { color: #008000; font-weight: bold } /* Keyword */ -.highlight .o { color: #666666 } /* Operator */ -.highlight .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */ -.highlight .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */ -.highlight .cp { color: #9C6500 } /* Comment.Preproc */ -.highlight .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */ -.highlight .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */ -.highlight .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */ -.highlight .gd { color: #A00000 } /* Generic.Deleted */ -.highlight .ge { font-style: italic } /* Generic.Emph */ -.highlight .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */ -.highlight .gr { color: #E40000 } /* Generic.Error */ -.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ -.highlight .gi { color: #008400 } /* Generic.Inserted */ -.highlight .go { color: #717171 } /* Generic.Output */ -.highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ -.highlight .gs { font-weight: bold } /* Generic.Strong */ -.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ -.highlight .gt { color: #0044DD } /* Generic.Traceback */ -.highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */ -.highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */ -.highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */ -.highlight .kp { color: #008000 } /* Keyword.Pseudo */ -.highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */ -.highlight .kt { color: #B00040 } /* Keyword.Type */ -.highlight .m { color: #666666 } /* Literal.Number */ -.highlight .s { color: #BA2121 } /* Literal.String */ -.highlight .na { color: #687822 } /* Name.Attribute */ -.highlight .nb { color: #008000 } /* Name.Builtin */ -.highlight .nc { color: #0000FF; font-weight: bold } /* Name.Class */ -.highlight .no { color: #880000 } /* Name.Constant */ -.highlight .nd { color: #AA22FF } /* Name.Decorator */ -.highlight .ni { color: #717171; font-weight: bold } /* Name.Entity */ -.highlight .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */ -.highlight .nf { color: #0000FF } /* Name.Function */ -.highlight .nl { color: #767600 } /* Name.Label */ -.highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ -.highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */ -.highlight .nv { color: #19177C } /* Name.Variable */ -.highlight .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ -.highlight .w { color: #bbbbbb } /* Text.Whitespace */ -.highlight .mb { color: #666666 } /* Literal.Number.Bin */ -.highlight .mf { color: #666666 } /* Literal.Number.Float */ -.highlight .mh { color: #666666 } /* Literal.Number.Hex */ -.highlight .mi { color: #666666 } /* Literal.Number.Integer */ -.highlight .mo { color: #666666 } /* Literal.Number.Oct */ -.highlight .sa { color: #BA2121 } /* Literal.String.Affix */ -.highlight .sb { color: #BA2121 } /* Literal.String.Backtick */ -.highlight .sc { color: #BA2121 } /* Literal.String.Char */ -.highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */ -.highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */ -.highlight .s2 { color: #BA2121 } /* Literal.String.Double */ -.highlight .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */ -.highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */ -.highlight .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */ -.highlight .sx { color: #008000 } /* Literal.String.Other */ -.highlight .sr { color: #A45A77 } /* Literal.String.Regex */ -.highlight .s1 { color: #BA2121 } /* Literal.String.Single */ -.highlight .ss { color: #19177C } /* Literal.String.Symbol */ -.highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */ -.highlight .fm { color: #0000FF } /* Name.Function.Magic */ -.highlight .vc { color: #19177C } /* Name.Variable.Class */ -.highlight .vg { color: #19177C } /* Name.Variable.Global */ -.highlight .vi { color: #19177C } /* Name.Variable.Instance */ -.highlight .vm { color: #19177C } /* Name.Variable.Magic */ -.highlight .il { color: #666666 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/searchtools.js b/docs/$READTHEDOCS_OUTPUT/html/_static/searchtools.js deleted file mode 100644 index 97d56a7..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/searchtools.js +++ /dev/null @@ -1,566 +0,0 @@ -/* - * searchtools.js - * ~~~~~~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for the full-text search. - * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ -"use strict"; - -/** - * Simple result scoring code. - */ -if (typeof Scorer === "undefined") { - var Scorer = { - // Implement the following function to further tweak the score for each result - // The function takes a result array [docname, title, anchor, descr, score, filename] - // and returns the new score. - /* - score: result => { - const [docname, title, anchor, descr, score, filename] = result - return score - }, - */ - - // query matches the full name of an object - objNameMatch: 11, - // or matches in the last dotted part of the object name - objPartialMatch: 6, - // Additive scores depending on the priority of the object - objPrio: { - 0: 15, // used to be importantResults - 1: 5, // used to be objectResults - 2: -5, // used to be unimportantResults - }, - // Used when the priority is not in the mapping. - objPrioDefault: 0, - - // query found in title - title: 15, - partialTitle: 7, - // query found in terms - term: 5, - partialTerm: 2, - }; -} - -const _removeChildren = (element) => { - while (element && element.lastChild) element.removeChild(element.lastChild); -}; - -/** - * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping - */ -const _escapeRegExp = (string) => - string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string - -const _displayItem = (item, searchTerms) => { - const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; - const docUrlRoot = DOCUMENTATION_OPTIONS.URL_ROOT; - const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; - const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; - const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; - - const [docName, title, anchor, descr, score, _filename] = item; - - let listItem = document.createElement("li"); - let requestUrl; - let linkUrl; - if (docBuilder === "dirhtml") { - // dirhtml builder - let dirname = docName + "/"; - if (dirname.match(/\/index\/$/)) - dirname = dirname.substring(0, dirname.length - 6); - else if (dirname === "index/") dirname = ""; - requestUrl = docUrlRoot + dirname; - linkUrl = requestUrl; - } else { - // normal html builders - requestUrl = docUrlRoot + docName + docFileSuffix; - linkUrl = docName + docLinkSuffix; - } - let linkEl = listItem.appendChild(document.createElement("a")); - linkEl.href = linkUrl + anchor; - linkEl.dataset.score = score; - linkEl.innerHTML = title; - if (descr) - listItem.appendChild(document.createElement("span")).innerHTML = - " (" + descr + ")"; - else if (showSearchSummary) - fetch(requestUrl) - .then((responseData) => responseData.text()) - .then((data) => { - if (data) - listItem.appendChild( - Search.makeSearchSummary(data, searchTerms) - ); - }); - Search.output.appendChild(listItem); -}; -const _finishSearch = (resultCount) => { - Search.stopPulse(); - Search.title.innerText = _("Search Results"); - if (!resultCount) - Search.status.innerText = Documentation.gettext( - "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." - ); - else - Search.status.innerText = _( - `Search finished, found ${resultCount} page(s) matching the search query.` - ); -}; -const _displayNextItem = ( - results, - resultCount, - searchTerms -) => { - // results left, load the summary and display it - // this is intended to be dynamic (don't sub resultsCount) - if (results.length) { - _displayItem(results.pop(), searchTerms); - setTimeout( - () => _displayNextItem(results, resultCount, searchTerms), - 5 - ); - } - // search finished, update title and status message - else _finishSearch(resultCount); -}; - -/** - * Default splitQuery function. Can be overridden in ``sphinx.search`` with a - * custom function per language. - * - * The regular expression works by splitting the string on consecutive characters - * that are not Unicode letters, numbers, underscores, or emoji characters. - * This is the same as ``\W+`` in Python, preserving the surrogate pair area. - */ -if (typeof splitQuery === "undefined") { - var splitQuery = (query) => query - .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) - .filter(term => term) // remove remaining empty strings -} - -/** - * Search Module - */ -const Search = { - _index: null, - _queued_query: null, - _pulse_status: -1, - - htmlToText: (htmlString) => { - const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); - htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() }); - const docContent = htmlElement.querySelector('[role="main"]'); - if (docContent !== undefined) return docContent.textContent; - console.warn( - "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template." - ); - return ""; - }, - - init: () => { - const query = new URLSearchParams(window.location.search).get("q"); - document - .querySelectorAll('input[name="q"]') - .forEach((el) => (el.value = query)); - if (query) Search.performSearch(query); - }, - - loadIndex: (url) => - (document.body.appendChild(document.createElement("script")).src = url), - - setIndex: (index) => { - Search._index = index; - if (Search._queued_query !== null) { - const query = Search._queued_query; - Search._queued_query = null; - Search.query(query); - } - }, - - hasIndex: () => Search._index !== null, - - deferQuery: (query) => (Search._queued_query = query), - - stopPulse: () => (Search._pulse_status = -1), - - startPulse: () => { - if (Search._pulse_status >= 0) return; - - const pulse = () => { - Search._pulse_status = (Search._pulse_status + 1) % 4; - Search.dots.innerText = ".".repeat(Search._pulse_status); - if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); - }; - pulse(); - }, - - /** - * perform a search for something (or wait until index is loaded) - */ - performSearch: (query) => { - // create the required interface elements - const searchText = document.createElement("h2"); - searchText.textContent = _("Searching"); - const searchSummary = document.createElement("p"); - searchSummary.classList.add("search-summary"); - searchSummary.innerText = ""; - const searchList = document.createElement("ul"); - searchList.classList.add("search"); - - const out = document.getElementById("search-results"); - Search.title = out.appendChild(searchText); - Search.dots = Search.title.appendChild(document.createElement("span")); - Search.status = out.appendChild(searchSummary); - Search.output = out.appendChild(searchList); - - const searchProgress = document.getElementById("search-progress"); - // Some themes don't use the search progress node - if (searchProgress) { - searchProgress.innerText = _("Preparing search..."); - } - Search.startPulse(); - - // index already loaded, the browser was quick! - if (Search.hasIndex()) Search.query(query); - else Search.deferQuery(query); - }, - - /** - * execute search (requires search index to be loaded) - */ - query: (query) => { - const filenames = Search._index.filenames; - const docNames = Search._index.docnames; - const titles = Search._index.titles; - const allTitles = Search._index.alltitles; - const indexEntries = Search._index.indexentries; - - // stem the search terms and add them to the correct list - const stemmer = new Stemmer(); - const searchTerms = new Set(); - const excludedTerms = new Set(); - const highlightTerms = new Set(); - const objectTerms = new Set(splitQuery(query.toLowerCase().trim())); - splitQuery(query.trim()).forEach((queryTerm) => { - const queryTermLower = queryTerm.toLowerCase(); - - // maybe skip this "word" - // stopwords array is from language_data.js - if ( - stopwords.indexOf(queryTermLower) !== -1 || - queryTerm.match(/^\d+$/) - ) - return; - - // stem the word - let word = stemmer.stemWord(queryTermLower); - // select the correct list - if (word[0] === "-") excludedTerms.add(word.substr(1)); - else { - searchTerms.add(word); - highlightTerms.add(queryTermLower); - } - }); - - if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js - localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" ")) - } - - // console.debug("SEARCH: searching for:"); - // console.info("required: ", [...searchTerms]); - // console.info("excluded: ", [...excludedTerms]); - - // array of [docname, title, anchor, descr, score, filename] - let results = []; - _removeChildren(document.getElementById("search-progress")); - - const queryLower = query.toLowerCase(); - for (const [title, foundTitles] of Object.entries(allTitles)) { - if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) { - for (const [file, id] of foundTitles) { - let score = Math.round(100 * queryLower.length / title.length) - results.push([ - docNames[file], - titles[file] !== title ? `${titles[file]} > ${title}` : title, - id !== null ? "#" + id : "", - null, - score, - filenames[file], - ]); - } - } - } - - // search for explicit entries in index directives - for (const [entry, foundEntries] of Object.entries(indexEntries)) { - if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { - for (const [file, id] of foundEntries) { - let score = Math.round(100 * queryLower.length / entry.length) - results.push([ - docNames[file], - titles[file], - id ? "#" + id : "", - null, - score, - filenames[file], - ]); - } - } - } - - // lookup as object - objectTerms.forEach((term) => - results.push(...Search.performObjectSearch(term, objectTerms)) - ); - - // lookup as search terms in fulltext - results.push(...Search.performTermsSearch(searchTerms, excludedTerms)); - - // let the scorer override scores with a custom scoring function - if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item))); - - // now sort the results by score (in opposite order of appearance, since the - // display function below uses pop() to retrieve items) and then - // alphabetically - results.sort((a, b) => { - const leftScore = a[4]; - const rightScore = b[4]; - if (leftScore === rightScore) { - // same score: sort alphabetically - const leftTitle = a[1].toLowerCase(); - const rightTitle = b[1].toLowerCase(); - if (leftTitle === rightTitle) return 0; - return leftTitle > rightTitle ? -1 : 1; // inverted is intentional - } - return leftScore > rightScore ? 1 : -1; - }); - - // remove duplicate search results - // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept - let seen = new Set(); - results = results.reverse().reduce((acc, result) => { - let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(','); - if (!seen.has(resultStr)) { - acc.push(result); - seen.add(resultStr); - } - return acc; - }, []); - - results = results.reverse(); - - // for debugging - //Search.lastresults = results.slice(); // a copy - // console.info("search results:", Search.lastresults); - - // print the results - _displayNextItem(results, results.length, searchTerms); - }, - - /** - * search for object names - */ - performObjectSearch: (object, objectTerms) => { - const filenames = Search._index.filenames; - const docNames = Search._index.docnames; - const objects = Search._index.objects; - const objNames = Search._index.objnames; - const titles = Search._index.titles; - - const results = []; - - const objectSearchCallback = (prefix, match) => { - const name = match[4] - const fullname = (prefix ? prefix + "." : "") + name; - const fullnameLower = fullname.toLowerCase(); - if (fullnameLower.indexOf(object) < 0) return; - - let score = 0; - const parts = fullnameLower.split("."); - - // check for different match types: exact matches of full name or - // "last name" (i.e. last dotted part) - if (fullnameLower === object || parts.slice(-1)[0] === object) - score += Scorer.objNameMatch; - else if (parts.slice(-1)[0].indexOf(object) > -1) - score += Scorer.objPartialMatch; // matches in last name - - const objName = objNames[match[1]][2]; - const title = titles[match[0]]; - - // If more than one term searched for, we require other words to be - // found in the name/title/description - const otherTerms = new Set(objectTerms); - otherTerms.delete(object); - if (otherTerms.size > 0) { - const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase(); - if ( - [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0) - ) - return; - } - - let anchor = match[3]; - if (anchor === "") anchor = fullname; - else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname; - - const descr = objName + _(", in ") + title; - - // add custom score for some objects according to scorer - if (Scorer.objPrio.hasOwnProperty(match[2])) - score += Scorer.objPrio[match[2]]; - else score += Scorer.objPrioDefault; - - results.push([ - docNames[match[0]], - fullname, - "#" + anchor, - descr, - score, - filenames[match[0]], - ]); - }; - Object.keys(objects).forEach((prefix) => - objects[prefix].forEach((array) => - objectSearchCallback(prefix, array) - ) - ); - return results; - }, - - /** - * search for full-text terms in the index - */ - performTermsSearch: (searchTerms, excludedTerms) => { - // prepare search - const terms = Search._index.terms; - const titleTerms = Search._index.titleterms; - const filenames = Search._index.filenames; - const docNames = Search._index.docnames; - const titles = Search._index.titles; - - const scoreMap = new Map(); - const fileMap = new Map(); - - // perform the search on the required terms - searchTerms.forEach((word) => { - const files = []; - const arr = [ - { files: terms[word], score: Scorer.term }, - { files: titleTerms[word], score: Scorer.title }, - ]; - // add support for partial matches - if (word.length > 2) { - const escapedWord = _escapeRegExp(word); - Object.keys(terms).forEach((term) => { - if (term.match(escapedWord) && !terms[word]) - arr.push({ files: terms[term], score: Scorer.partialTerm }); - }); - Object.keys(titleTerms).forEach((term) => { - if (term.match(escapedWord) && !titleTerms[word]) - arr.push({ files: titleTerms[word], score: Scorer.partialTitle }); - }); - } - - // no match but word was a required one - if (arr.every((record) => record.files === undefined)) return; - - // found search word in contents - arr.forEach((record) => { - if (record.files === undefined) return; - - let recordFiles = record.files; - if (recordFiles.length === undefined) recordFiles = [recordFiles]; - files.push(...recordFiles); - - // set score for the word in each file - recordFiles.forEach((file) => { - if (!scoreMap.has(file)) scoreMap.set(file, {}); - scoreMap.get(file)[word] = record.score; - }); - }); - - // create the mapping - files.forEach((file) => { - if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1) - fileMap.get(file).push(word); - else fileMap.set(file, [word]); - }); - }); - - // now check if the files don't contain excluded terms - const results = []; - for (const [file, wordList] of fileMap) { - // check if all requirements are matched - - // as search terms with length < 3 are discarded - const filteredTermCount = [...searchTerms].filter( - (term) => term.length > 2 - ).length; - if ( - wordList.length !== searchTerms.size && - wordList.length !== filteredTermCount - ) - continue; - - // ensure that none of the excluded terms is in the search result - if ( - [...excludedTerms].some( - (term) => - terms[term] === file || - titleTerms[term] === file || - (terms[term] || []).includes(file) || - (titleTerms[term] || []).includes(file) - ) - ) - break; - - // select one (max) score for the file. - const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w])); - // add result to the result list - results.push([ - docNames[file], - titles[file], - "", - null, - score, - filenames[file], - ]); - } - return results; - }, - - /** - * helper function to return a node containing the - * search summary for a given text. keywords is a list - * of stemmed words. - */ - makeSearchSummary: (htmlText, keywords) => { - const text = Search.htmlToText(htmlText); - if (text === "") return null; - - const textLower = text.toLowerCase(); - const actualStartPosition = [...keywords] - .map((k) => textLower.indexOf(k.toLowerCase())) - .filter((i) => i > -1) - .slice(-1)[0]; - const startWithContext = Math.max(actualStartPosition - 120, 0); - - const top = startWithContext === 0 ? "" : "..."; - const tail = startWithContext + 240 < text.length ? "..." : ""; - - let summary = document.createElement("p"); - summary.classList.add("context"); - summary.textContent = top + text.substr(startWithContext, 240).trim() + tail; - - return summary; - }, -}; - -_ready(Search.init); diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/sphinx_highlight.js b/docs/$READTHEDOCS_OUTPUT/html/_static/sphinx_highlight.js deleted file mode 100644 index aae669d..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/sphinx_highlight.js +++ /dev/null @@ -1,144 +0,0 @@ -/* Highlighting utilities for Sphinx HTML documentation. */ -"use strict"; - -const SPHINX_HIGHLIGHT_ENABLED = true - -/** - * highlight a given string on a node by wrapping it in - * span elements with the given class name. - */ -const _highlight = (node, addItems, text, className) => { - if (node.nodeType === Node.TEXT_NODE) { - const val = node.nodeValue; - const parent = node.parentNode; - const pos = val.toLowerCase().indexOf(text); - if ( - pos >= 0 && - !parent.classList.contains(className) && - !parent.classList.contains("nohighlight") - ) { - let span; - - const closestNode = parent.closest("body, svg, foreignObject"); - const isInSVG = closestNode && closestNode.matches("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.classList.add(className); - } - - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - parent.insertBefore( - span, - parent.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling - ) - ); - node.nodeValue = val.substr(0, pos); - - if (isInSVG) { - const rect = document.createElementNS( - "http://www.w3.org/2000/svg", - "rect" - ); - const bbox = parent.getBBox(); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute("class", className); - addItems.push({ parent: parent, target: rect }); - } - } - } else if (node.matches && !node.matches("button, select, textarea")) { - node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); - } -}; -const _highlightText = (thisNode, text, className) => { - let addItems = []; - _highlight(thisNode, addItems, text, className); - addItems.forEach((obj) => - obj.parent.insertAdjacentElement("beforebegin", obj.target) - ); -}; - -/** - * Small JavaScript module for the documentation. - */ -const SphinxHighlight = { - - /** - * highlight the search words provided in localstorage in the text - */ - highlightSearchWords: () => { - if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight - - // get and clear terms from localstorage - const url = new URL(window.location); - const highlight = - localStorage.getItem("sphinx_highlight_terms") - || url.searchParams.get("highlight") - || ""; - localStorage.removeItem("sphinx_highlight_terms") - url.searchParams.delete("highlight"); - window.history.replaceState({}, "", url); - - // get individual terms from highlight string - const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); - if (terms.length === 0) return; // nothing to do - - // There should never be more than one element matching "div.body" - const divBody = document.querySelectorAll("div.body"); - const body = divBody.length ? divBody[0] : document.querySelector("body"); - window.setTimeout(() => { - terms.forEach((term) => _highlightText(body, term, "highlighted")); - }, 10); - - const searchBox = document.getElementById("searchbox"); - if (searchBox === null) return; - searchBox.appendChild( - document - .createRange() - .createContextualFragment( - '" - ) - ); - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords: () => { - document - .querySelectorAll("#searchbox .highlight-link") - .forEach((el) => el.remove()); - document - .querySelectorAll("span.highlighted") - .forEach((el) => el.classList.remove("highlighted")); - localStorage.removeItem("sphinx_highlight_terms") - }, - - initEscapeListener: () => { - // only install a listener if it is really needed - if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; - - document.addEventListener("keydown", (event) => { - // bail for input elements - if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; - // bail with special keys - if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; - if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { - SphinxHighlight.hideSearchWords(); - event.preventDefault(); - } - }); - }, -}; - -_ready(SphinxHighlight.highlightSearchWords); -_ready(SphinxHighlight.initEscapeListener); diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/togglebutton.css b/docs/$READTHEDOCS_OUTPUT/html/_static/togglebutton.css deleted file mode 100644 index 54a6787..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/togglebutton.css +++ /dev/null @@ -1,160 +0,0 @@ -/** - * Admonition-based toggles - */ - -/* Visibility of the target */ -.admonition.toggle .admonition-title ~ * { - transition: opacity .3s, height .3s; -} - -/* Toggle buttons inside admonitions so we see the title */ -.admonition.toggle { - position: relative; -} - -/* Titles should cut off earlier to avoid overlapping w/ button */ -.admonition.toggle .admonition-title { - padding-right: 25%; - cursor: pointer; -} - -/* Hovering will cause a slight shift in color to make it feel interactive */ -.admonition.toggle .admonition-title:hover { - box-shadow: inset 0 0 0px 20px rgb(0 0 0 / 1%); -} - -/* Hovering will cause a slight shift in color to make it feel interactive */ -.admonition.toggle .admonition-title:active { - box-shadow: inset 0 0 0px 20px rgb(0 0 0 / 3%); -} - -/* Remove extra whitespace below the admonition title when hidden */ -.admonition.toggle-hidden { - padding-bottom: 0; -} - -.admonition.toggle-hidden .admonition-title { - margin-bottom: 0; -} - -/* hides all the content of a page until de-toggled */ -.admonition.toggle-hidden .admonition-title ~ * { - height: 0; - margin: 0; - opacity: 0; - visibility: hidden; -} - -/* General button style and position*/ -button.toggle-button { - /** - * Background and shape. By default there's no background - * but users can style as they wish - */ - background: none; - border: none; - outline: none; - - /* Positioning just inside the admonition title */ - position: absolute; - right: 0.5em; - padding: 0px; - border: none; - outline: none; -} - -/* Display the toggle hint on wide screens */ -@media (min-width: 768px) { - button.toggle-button.toggle-button-hidden:before { - content: attr(data-toggle-hint); /* This will be filled in by JS */ - font-size: .8em; - align-self: center; - } -} - -/* Icon behavior */ -.tb-icon { - transition: transform .2s ease-out; - height: 1.5em; - width: 1.5em; - stroke: currentColor; /* So that we inherit the color of other text */ -} - -/* The icon should point right when closed, down when open. */ -/* Open */ -.admonition.toggle button .tb-icon { - transform: rotate(90deg); -} - -/* Closed */ -.admonition.toggle button.toggle-button-hidden .tb-icon { - transform: rotate(0deg); -} - -/* With details toggles, we don't rotate the icon so it points right */ -details.toggle-details .tb-icon { - height: 1.4em; - width: 1.4em; - margin-top: 0.1em; /* To center the button vertically */ -} - - -/** - * Details-based toggles. - * In this case, we wrap elements with `.toggle` in a details block. - */ - -/* Details blocks */ -details.toggle-details { - margin: 1em 0; -} - - -details.toggle-details summary { - display: flex; - align-items: center; - cursor: pointer; - list-style: none; - border-radius: .2em; - border-left: 3px solid #1976d2; - background-color: rgb(204 204 204 / 10%); - padding: 0.2em 0.7em 0.3em 0.5em; /* Less padding on left because the SVG has left margin */ - font-size: 0.9em; -} - -details.toggle-details summary:hover { - background-color: rgb(204 204 204 / 20%); -} - -details.toggle-details summary:active { - background: rgb(204 204 204 / 28%); -} - -.toggle-details__summary-text { - margin-left: 0.2em; -} - -details.toggle-details[open] summary { - margin-bottom: .5em; -} - -details.toggle-details[open] summary .tb-icon { - transform: rotate(90deg); -} - -details.toggle-details[open] summary ~ * { - animation: toggle-fade-in .3s ease-out; -} - -@keyframes toggle-fade-in { - from {opacity: 0%;} - to {opacity: 100%;} -} - -/* Print rules - we hide all toggle button elements at print */ -@media print { - /* Always hide the summary so the button doesn't show up */ - details.toggle-details summary { - display: none; - } -} \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/_static/togglebutton.js b/docs/$READTHEDOCS_OUTPUT/html/_static/togglebutton.js deleted file mode 100644 index 215a7ee..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/_static/togglebutton.js +++ /dev/null @@ -1,187 +0,0 @@ -/** - * Add Toggle Buttons to elements - */ - -let toggleChevron = ` - - - -`; - -var initToggleItems = () => { - var itemsToToggle = document.querySelectorAll(togglebuttonSelector); - console.log(`[togglebutton]: Adding toggle buttons to ${itemsToToggle.length} items`) - // Add the button to each admonition and hook up a callback to toggle visibility - itemsToToggle.forEach((item, index) => { - if (item.classList.contains("admonition")) { - // If it's an admonition block, then we'll add a button inside - // Generate unique IDs for this item - var toggleID = `toggle-${index}`; - var buttonID = `button-${toggleID}`; - - item.setAttribute('id', toggleID); - if (!item.classList.contains("toggle")){ - item.classList.add("toggle"); - } - // This is the button that will be added to each item to trigger the toggle - var collapseButton = ` - `; - - title = item.querySelector(".admonition-title") - title.insertAdjacentHTML("beforeend", collapseButton); - thisButton = document.getElementById(buttonID); - - // Add click handlers for the button + admonition title (if admonition) - admonitionTitle = document.querySelector(`#${toggleID} > .admonition-title`) - if (admonitionTitle) { - // If an admonition, then make the whole title block clickable - admonitionTitle.addEventListener('click', toggleClickHandler); - admonitionTitle.dataset.target = toggleID - admonitionTitle.dataset.button = buttonID - } else { - // If not an admonition then we'll listen for the button click - thisButton.addEventListener('click', toggleClickHandler); - } - - // Now hide the item for this toggle button unless explicitly noted to show - if (!item.classList.contains("toggle-shown")) { - toggleHidden(thisButton); - } - } else { - // If not an admonition, wrap the block in a
block - // Define the structure of the details block and insert it as a sibling - var detailsBlock = ` -
- - ${toggleChevron} - ${toggleHintShow} - -
`; - item.insertAdjacentHTML("beforebegin", detailsBlock); - - // Now move the toggle-able content inside of the details block - details = item.previousElementSibling - details.appendChild(item) - item.classList.add("toggle-details__container") - - // Set up a click trigger to change the text as needed - details.addEventListener('click', (click) => { - let parent = click.target.parentElement; - if (parent.tagName.toLowerCase() == "details") { - summary = parent.querySelector("summary"); - details = parent; - } else { - summary = parent; - details = parent.parentElement; - } - // Update the inner text for the proper hint - if (details.open) { - summary.querySelector("span.toggle-details__summary-text").innerText = toggleHintShow; - } else { - summary.querySelector("span.toggle-details__summary-text").innerText = toggleHintHide; - } - - }); - - // If we have a toggle-shown class, open details block should be open - if (item.classList.contains("toggle-shown")) { - details.click(); - } - } - }) -}; - -// This should simply add / remove the collapsed class and change the button text -var toggleHidden = (button) => { - target = button.dataset['target'] - var itemToToggle = document.getElementById(target); - if (itemToToggle.classList.contains("toggle-hidden")) { - itemToToggle.classList.remove("toggle-hidden"); - button.classList.remove("toggle-button-hidden"); - } else { - itemToToggle.classList.add("toggle-hidden"); - button.classList.add("toggle-button-hidden"); - } -} - -var toggleClickHandler = (click) => { - // Be cause the admonition title is clickable and extends to the whole admonition - // We only look for a click event on this title to trigger the toggle. - - if (click.target.classList.contains("admonition-title")) { - button = click.target.querySelector(".toggle-button"); - } else if (click.target.classList.contains("tb-icon")) { - // We've clicked the icon and need to search up one parent for the button - button = click.target.parentElement; - } else if (click.target.tagName == "polyline") { - // We've clicked the SVG elements inside the button, need to up 2 layers - button = click.target.parentElement.parentElement; - } else if (click.target.classList.contains("toggle-button")) { - // We've clicked the button itself and so don't need to do anything - button = click.target; - } else { - console.log(`[togglebutton]: Couldn't find button for ${click.target}`) - } - target = document.getElementById(button.dataset['button']); - toggleHidden(target); -} - -// If we want to blanket-add toggle classes to certain cells -var addToggleToSelector = () => { - const selector = ""; - if (selector.length > 0) { - document.querySelectorAll(selector).forEach((item) => { - item.classList.add("toggle"); - }) - } -} - -// Helper function to run when the DOM is finished -const sphinxToggleRunWhenDOMLoaded = cb => { - if (document.readyState != 'loading') { - cb() - } else if (document.addEventListener) { - document.addEventListener('DOMContentLoaded', cb) - } else { - document.attachEvent('onreadystatechange', function() { - if (document.readyState == 'complete') cb() - }) - } -} -sphinxToggleRunWhenDOMLoaded(addToggleToSelector) -sphinxToggleRunWhenDOMLoaded(initToggleItems) - -/** Toggle details blocks to be open when printing */ -if (toggleOpenOnPrint == "true") { - window.addEventListener("beforeprint", () => { - // Open the details - document.querySelectorAll("details.toggle-details").forEach((el) => { - el.dataset["togglestatus"] = el.open; - el.open = true; - }); - - // Open the admonitions - document.querySelectorAll(".admonition.toggle.toggle-hidden").forEach((el) => { - console.log(el); - el.querySelector("button.toggle-button").click(); - el.dataset["toggle_after_print"] = "true"; - }); - }); - window.addEventListener("afterprint", () => { - // Re-close the details that were closed - document.querySelectorAll("details.toggle-details").forEach((el) => { - el.open = el.dataset["togglestatus"] == "true"; - delete el.dataset["togglestatus"]; - }); - - // Re-close the admonition toggle buttons - document.querySelectorAll(".admonition.toggle").forEach((el) => { - if (el.dataset["toggle_after_print"] == "true") { - el.querySelector("button.toggle-button").click(); - delete el.dataset["toggle_after_print"]; - } - }); - }); -} diff --git a/docs/$READTHEDOCS_OUTPUT/html/api/base_models/BaseModels.html b/docs/$READTHEDOCS_OUTPUT/html/api/base_models/BaseModels.html deleted file mode 100644 index 054cc67..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/api/base_models/BaseModels.html +++ /dev/null @@ -1,3173 +0,0 @@ - - - - - - - Base Models — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
- -
-
-
-
- -
-

Base Models

-
-
-class mambular.base_models.classifier.BaseMambularClassifier(num_classes, config, cat_feature_info=None, num_feature_info=None, lr=0.001, lr_patience=10, weight_decay=0.025, lr_factor=0.75)[source]
-

A base class for building classification models using the Mambular architecture within the PyTorch Lightning framework.

-

This class integrates various components such as embeddings for categorical and numerical features, the Mambular model -for processing sequences of embeddings, and a classification head for prediction. It supports multi-class and binary classification tasks.

-
-
Parameters:
-
    -
  • num_classes (int) – The number of classes in the classification task. For binary classification, this should be 2.

  • -
  • config (MambularConfig) – An instance of MambularConfig containing configuration parameters for the Mambular model.

  • -
  • cat_feature_info (dict, optional) – A dictionary mapping the names of categorical features to their number of unique categories. -This information is used to configure embedding layers for categorical features. Defaults to None.

  • -
  • num_feature_info (dict, optional) – A dictionary mapping the names of numerical features to the size of their input dimensions. -This information is used to configure embedding layers for numerical features. Defaults to None.

  • -
  • lr (float, optional) – The learning rate for the optimizer. Defaults to 1e-03.

  • -
  • lr_patience (int, optional) – The number of epochs with no improvement after which learning rate will be reduced. Defaults to 10.

  • -
  • weight_decay (float, optional) – Weight decay (L2 penalty) parameter for the optimizer. Defaults to 0.025.

  • -
  • lr_factor (float, optional) – Factor by which the learning rate will be reduced. Defaults to 0.75.

  • -
-
-
-
-
-embedding_activation
-

The activation function to be applied after the linear transformation of numerical features.

-
-
Type:
-

nn.Module

-
-
-
- -
-
-num_embeddings
-

A list of sequential modules, each corresponding to an embedding layer for a numerical feature.

-
-
Type:
-

nn.ModuleList

-
-
-
- -
-
-cat_embeddings
-

A list of embedding layers, each corresponding to a categorical feature.

-
-
Type:
-

nn.ModuleList

-
-
-
- -
-
-mamba
-

The Mambular model for processing sequences of embeddings.

-
-
Type:
-

Mamba

-
-
-
- -
-
-norm_f
-

A normalization layer applied after the Mambular model.

-
-
Type:
-

nn.Module

-
-
-
- -
-
-tabular_head
-

A linear layer for predicting the class labels from the aggregated embedding representation.

-
-
Type:
-

nn.Linear

-
-
-
- -
-
-pooling_method
-

The method used to aggregate embeddings across features. Supported methods are ‘avg’, ‘max’, and ‘sum’.

-
-
Type:
-

str

-
-
-
- -
-
-loss_fct
-

The loss function used for training the model, configured based on the number of classes.

-
-
Type:
-

nn.Module

-
-
-
- -
-
-acc
-

A metric for computing the accuracy of predictions.

-
-
Type:
-

torchmetrics.Accuracy

-
-
-
- -
-
-auroc
-

A metric for computing the Area Under the Receiver Operating Characteristic curve.

-
-
Type:
-

torchmetrics.AUROC

-
-
-
- -
-
-precision
-

A metric for computing the precision of predictions.

-
-
Type:
-

torchmetrics.Precision

-
-
-
- -
-
-forward(cat_features, num_features)[source]
-

Defines the forward pass of the model, processing both categorical and numerical features, aggregating embeddings, -and producing predictions.

-
- -
-
-training_step(batch, batch_idx)[source]
-

Performs a single training step, computing the loss and logging metrics for the training set.

-
- -
-
-validation_step(batch, batch_idx)[source]
-

Performs a single validation step, computing the loss and logging metrics for the validation set.

-
- -
-
-configure_optimizers()[source]
-

Configures the model’s optimizers and learning rate schedulers.

-
- -
-
Attributes:
-
-
automatic_optimization

If set to False you are responsible for calling .backward(), .step(), .zero_grad().

-
-
current_epoch

The current epoch in the Trainer, or 0 if not attached.

-
-
device
-
dtype
-
example_input_array

The example input array is a specification of what the module can consume in the forward() method.

-
-
fabric
-
global_rank

The index of the current process across all nodes and devices.

-
-
global_step

Total training batches seen across all epochs.

-
-
hparams

The collection of hyperparameters saved with save_hyperparameters().

-
-
hparams_initial

The collection of hyperparameters saved with save_hyperparameters().

-
-
local_rank

The index of the current process within a single node.

-
-
logger

Reference to the logger object in the Trainer.

-
-
loggers

Reference to the list of loggers in the Trainer.

-
-
on_gpu

Returns True if this model is currently located on a GPU.

-
-
strict_loading

Determines how Lightning loads this model using .load_state_dict(…, strict=model.strict_loading).

-
-
trainer
-
-
-
-

Methods

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

add_module(name, module)

Add a child module to the current module.

all_gather(data[, group, sync_grads])

Gather tensors or collections of tensors from multiple processes.

apply(fn)

Apply fn recursively to every submodule (as returned by .children()) as well as self.

backward(loss, *args, **kwargs)

Called to perform backward on the loss returned in training_step().

bfloat16()

Casts all floating point parameters and buffers to bfloat16 datatype.

buffers([recurse])

Return an iterator over module buffers.

children()

Return an iterator over immediate children modules.

clip_gradients(optimizer[, ...])

Handles gradient clipping internally.

compile(*args, **kwargs)

Compile this Module's forward using torch.compile().

configure_callbacks()

Configure model-specific callbacks.

configure_gradient_clipping(optimizer[, ...])

Perform gradient clipping for the optimizer parameters.

configure_model()

Hook to create modules in a strategy and precision aware context.

configure_optimizers()

Sets up the model's optimizer and learning rate scheduler based on the configurations provided.

configure_sharded_model()

Deprecated.

cpu()

See torch.nn.Module.cpu().

cuda([device])

Moves all model parameters and buffers to the GPU.

double()

See torch.nn.Module.double().

eval()

Set the module in evaluation mode.

extra_repr()

Set the extra representation of the module.

float()

See torch.nn.Module.float().

forward(cat_features, num_features)

Defines the forward pass of the classifier.

freeze()

Freeze all params for inference.

get_buffer(target)

Return the buffer given by target if it exists, otherwise throw an error.

get_extra_state()

Return any extra state to include in the module's state_dict.

get_parameter(target)

Return the parameter given by target if it exists, otherwise throw an error.

get_submodule(target)

Return the submodule given by target if it exists, otherwise throw an error.

half()

See torch.nn.Module.half().

ipu([device])

Move all model parameters and buffers to the IPU.

load_from_checkpoint(checkpoint_path[, ...])

Primary way of loading a model from a checkpoint.

load_state_dict(state_dict[, strict, assign])

Copy parameters and buffers from state_dict into this module and its descendants.

log(name, value[, prog_bar, logger, ...])

Log a key, value pair.

log_dict(dictionary[, prog_bar, logger, ...])

Log a dictionary of values at once.

lr_scheduler_step(scheduler, metric)

Override this method to adjust the default way the Trainer calls each scheduler.

lr_schedulers()

Returns the learning rate scheduler(s) that are being used during training.

manual_backward(loss, *args, **kwargs)

Call this directly from your training_step() when doing optimizations manually.

modules()

Return an iterator over all modules in the network.

named_buffers([prefix, recurse, ...])

Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.

named_children()

Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

named_modules([memo, prefix, remove_duplicate])

Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

named_parameters([prefix, recurse, ...])

Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.

on_after_backward()

Called after loss.backward() and before optimizers are stepped.

on_after_batch_transfer(batch, dataloader_idx)

Override to alter or apply batch augmentations to your batch after it is transferred to the device.

on_before_backward(loss)

Called before loss.backward().

on_before_batch_transfer(batch, dataloader_idx)

Override to alter or apply batch augmentations to your batch before it is transferred to the device.

on_before_optimizer_step(optimizer)

Called before optimizer.step().

on_before_zero_grad(optimizer)

Called after training_step() and before optimizer.zero_grad().

on_fit_end()

Called at the very end of fit.

on_fit_start()

Called at the very beginning of fit.

on_load_checkpoint(checkpoint)

Called by Lightning to restore your model.

on_predict_batch_end(outputs, batch, batch_idx)

Called in the predict loop after the batch.

on_predict_batch_start(batch, batch_idx[, ...])

Called in the predict loop before anything happens for that batch.

on_predict_end()

Called at the end of predicting.

on_predict_epoch_end()

Called at the end of predicting.

on_predict_epoch_start()

Called at the beginning of predicting.

on_predict_model_eval()

Called when the predict loop starts.

on_predict_start()

Called at the beginning of predicting.

on_save_checkpoint(checkpoint)

Called by Lightning when saving a checkpoint to give you a chance to store anything else you might want to save.

on_test_batch_end(outputs, batch, batch_idx)

Called in the test loop after the batch.

on_test_batch_start(batch, batch_idx[, ...])

Called in the test loop before anything happens for that batch.

on_test_end()

Called at the end of testing.

on_test_epoch_end()

Called in the test loop at the very end of the epoch.

on_test_epoch_start()

Called in the test loop at the very beginning of the epoch.

on_test_model_eval()

Called when the test loop starts.

on_test_model_train()

Called when the test loop ends.

on_test_start()

Called at the beginning of testing.

on_train_batch_end(outputs, batch, batch_idx)

Called in the training loop after the batch.

on_train_batch_start(batch, batch_idx)

Called in the training loop before anything happens for that batch.

on_train_end()

Called at the end of training before logger experiment is closed.

on_train_epoch_end()

Called in the training loop at the very end of the epoch.

on_train_epoch_start()

Called in the training loop at the very beginning of the epoch.

on_train_start()

Called at the beginning of training after sanity check.

on_validation_batch_end(outputs, batch, ...)

Called in the validation loop after the batch.

on_validation_batch_start(batch, batch_idx)

Called in the validation loop before anything happens for that batch.

on_validation_end()

Called at the end of validation.

on_validation_epoch_end()

Called in the validation loop at the very end of the epoch.

on_validation_epoch_start()

Called in the validation loop at the very beginning of the epoch.

on_validation_model_eval()

Called when the validation loop starts.

on_validation_model_train()

Called when the validation loop ends.

on_validation_model_zero_grad()

Called by the training loop to release gradients before entering the validation loop.

on_validation_start()

Called at the beginning of validation.

optimizer_step(epoch, batch_idx, optimizer)

Override this method to adjust the default way the Trainer calls the optimizer.

optimizer_zero_grad(epoch, batch_idx, optimizer)

Override this method to change the default behaviour of optimizer.zero_grad().

optimizers([use_pl_optimizer])

Returns the optimizer(s) that are being used during training.

parameters([recurse])

Return an iterator over module parameters.

predict_dataloader()

An iterable or collection of iterables specifying prediction samples.

predict_step(*args, **kwargs)

Step function called during predict().

prepare_data()

Use this to download and prepare data.

print(*args, **kwargs)

Prints only from process 0.

register_backward_hook(hook)

Register a backward hook on the module.

register_buffer(name, tensor[, persistent])

Add a buffer to the module.

register_forward_hook(hook, *[, prepend, ...])

Register a forward hook on the module.

register_forward_pre_hook(hook, *[, ...])

Register a forward pre-hook on the module.

register_full_backward_hook(hook[, prepend])

Register a backward hook on the module.

register_full_backward_pre_hook(hook[, prepend])

Register a backward pre-hook on the module.

register_load_state_dict_post_hook(hook)

Register a post hook to be run after module's load_state_dict is called.

register_module(name, module)

Alias for add_module().

register_parameter(name, param)

Add a parameter to the module.

register_state_dict_pre_hook(hook)

Register a pre-hook for the state_dict() method.

requires_grad_([requires_grad])

Change if autograd should record operations on parameters in this module.

save_hyperparameters(*args[, ignore, frame, ...])

Save arguments to hparams attribute.

set_extra_state(state)

Set extra state contained in the loaded state_dict.

setup(stage)

Called at the beginning of fit (train + validate), validate, test, or predict.

share_memory()

See torch.Tensor.share_memory_().

state_dict(*args[, destination, prefix, ...])

Return a dictionary containing references to the whole state of the module.

teardown(stage)

Called at the end of fit (train + validate), validate, test, or predict.

test_dataloader()

An iterable or collection of iterables specifying test samples.

test_step(*args, **kwargs)

Operates on a single batch of data from the test set.

to(*args, **kwargs)

See torch.nn.Module.to().

to_empty(*, device[, recurse])

Move the parameters and buffers to the specified device without copying storage.

to_onnx(file_path[, input_sample])

Saves the model in ONNX format.

to_torchscript([file_path, method, ...])

By default compiles the whole model to a ScriptModule.

toggle_optimizer(optimizer)

Makes sure only the gradients of the current optimizer's parameters are calculated in the training step to prevent dangling gradients in multiple-optimizer setup.

train([mode])

Set the module in training mode.

train_dataloader()

An iterable or collection of iterables specifying training samples.

training_step(batch, batch_idx)

Processes a single batch during training, computes the loss and logs training metrics.

transfer_batch_to_device(batch, device, ...)

Override this hook if your DataLoader returns tensors wrapped in a custom data structure.

type(dst_type)

See torch.nn.Module.type().

unfreeze()

Unfreeze all parameters for training.

untoggle_optimizer(optimizer)

Resets the state of required gradients that were toggled with toggle_optimizer().

val_dataloader()

An iterable or collection of iterables specifying validation samples.

validation_step(batch, batch_idx)

Processes a single batch during validation, computes the loss and logs validation metrics.

xpu([device])

Move all model parameters and buffers to the XPU.

zero_grad([set_to_none])

Reset gradients of all model parameters.

- - - - - - -

__call__

-
-
-configure_optimizers()[source]
-

Sets up the model’s optimizer and learning rate scheduler based on the configurations provided.

-
-
Returns:
-

A dictionary containing the optimizer and lr_scheduler configurations.

-
-
Return type:
-

dict

-
-
-
- -
-
-forward(cat_features, num_features)[source]
-

Defines the forward pass of the classifier.

-
-
Parameters:
-
    -
  • cat_features (Tensor) – Tensor containing the categorical features.

  • -
  • num_features (Tensor) – Tensor containing the numerical features.

  • -
-
-
Returns:
-

The output predictions of the model.

-
-
Return type:
-

Tensor

-
-
-
- -
-
-training_step(batch, batch_idx)[source]
-

Processes a single batch during training, computes the loss and logs training metrics.

-
-
Parameters:
-
    -
  • batch (tuple) – A batch of data from the DataLoader, containing numerical features, categorical features, and labels.

  • -
  • batch_idx (int) – The index of the batch within the epoch.

  • -
-
-
-
- -
-
-validation_step(batch, batch_idx)[source]
-

Processes a single batch during validation, computes the loss and logs validation metrics.

-
-
Parameters:
-
    -
  • batch (tuple) – A batch of data from the DataLoader, containing numerical features, categorical features, and labels.

  • -
  • batch_idx (int) – The index of the batch within the epoch.

  • -
-
-
-
- -
- -
-
-class mambular.base_models.distributional.BaseMambularLSS(family, config, cat_feature_info=None, num_feature_info=None, lr=0.001, lr_patience=10, weight_decay=0.025, lr_factor=0.75, **distribution_params)[source]
-

A base module for likelihood-based statistical learning (LSS) models built on PyTorch Lightning, -integrating the Mamba architecture for tabular data. This module is designed to accommodate various -statistical distribution families for different types of regression and classification tasks.

-
-
Parameters:
-
    -
  • family (str) – The name of the statistical distribution family to be used for modeling. Supported families include -‘normal’, ‘poisson’, ‘gamma’, ‘beta’, ‘dirichlet’, ‘studentt’, ‘negativebinom’, ‘inversegamma’, and ‘categorical’.

  • -
  • config (MambularConfig) – An instance of MambularConfig containing configuration parameters for the model architecture.

  • -
  • cat_feature_info (dict, optional) – A dictionary mapping the names of categorical features to their number of unique categories. Defaults to None.

  • -
  • num_feature_info (dict, optional) – A dictionary mapping the names of numerical features to their number of dimensions after embedding. Defaults to None.

  • -
  • lr (float, optional) – The initial learning rate for the optimizer. Defaults to 1e-03.

  • -
  • lr_patience (int, optional) – The number of epochs with no improvement after which learning rate will be reduced. Defaults to 10.

  • -
  • weight_decay (float, optional) – Weight decay (L2 penalty) coefficient. Defaults to 0.025.

  • -
  • lr_factor (float, optional) – Factor by which the learning rate will be reduced. Defaults to 0.75.

  • -
  • **distribution_params – Additional parameters specific to the chosen statistical distribution family.

  • -
-
-
-
-
-mamba
-

The core neural network module implementing the Mamba architecture.

-
-
Type:
-

Mamba

-
-
-
- -
-
-norm_f
-

Normalization layer applied after the Mamba block.

-
-
Type:
-

nn.Module

-
-
-
- -
-
-tabular_head
-

Final linear layer mapping the features to the parameters of the chosen statistical distribution.

-
-
Type:
-

nn.Linear

-
-
-
- -
-
-loss_fct
-

The loss function derived from the chosen statistical distribution.

-
-
Type:
-

callable

-
-
-
- -
-
-forward(cat_features, num_features)[source]
-

Defines the forward pass of the model.

-
- -
-
-training_step(batch, batch_idx)[source]
-

Processes a single batch during training.

-
- -
-
-validation_step(batch, batch_idx)[source]
-

Processes a single batch during validation.

-
- -
-
-configure_optimizers()[source]
-

Sets up the model’s optimizer and learning rate scheduler.

-
- -
-
Attributes:
-
-
automatic_optimization

If set to False you are responsible for calling .backward(), .step(), .zero_grad().

-
-
current_epoch

The current epoch in the Trainer, or 0 if not attached.

-
-
device
-
dtype
-
example_input_array

The example input array is a specification of what the module can consume in the forward() method.

-
-
fabric
-
global_rank

The index of the current process across all nodes and devices.

-
-
global_step

Total training batches seen across all epochs.

-
-
hparams

The collection of hyperparameters saved with save_hyperparameters().

-
-
hparams_initial

The collection of hyperparameters saved with save_hyperparameters().

-
-
local_rank

The index of the current process within a single node.

-
-
logger

Reference to the logger object in the Trainer.

-
-
loggers

Reference to the list of loggers in the Trainer.

-
-
on_gpu

Returns True if this model is currently located on a GPU.

-
-
strict_loading

Determines how Lightning loads this model using .load_state_dict(…, strict=model.strict_loading).

-
-
trainer
-
-
-
-

Methods

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

add_module(name, module)

Add a child module to the current module.

all_gather(data[, group, sync_grads])

Gather tensors or collections of tensors from multiple processes.

apply(fn)

Apply fn recursively to every submodule (as returned by .children()) as well as self.

backward(loss, *args, **kwargs)

Called to perform backward on the loss returned in training_step().

bfloat16()

Casts all floating point parameters and buffers to bfloat16 datatype.

buffers([recurse])

Return an iterator over module buffers.

children()

Return an iterator over immediate children modules.

clip_gradients(optimizer[, ...])

Handles gradient clipping internally.

compile(*args, **kwargs)

Compile this Module's forward using torch.compile().

configure_callbacks()

Configure model-specific callbacks.

configure_gradient_clipping(optimizer[, ...])

Perform gradient clipping for the optimizer parameters.

configure_model()

Hook to create modules in a strategy and precision aware context.

configure_optimizers()

Sets up the model's optimizer and learning rate scheduler based on the configurations provided.

configure_sharded_model()

Deprecated.

cpu()

See torch.nn.Module.cpu().

cuda([device])

Moves all model parameters and buffers to the GPU.

double()

See torch.nn.Module.double().

eval()

Set the module in evaluation mode.

extra_repr()

Set the extra representation of the module.

float()

See torch.nn.Module.float().

forward(cat_features, num_features)

Defines the forward pass of the model, processing both categorical and numerical features, and returning predictions based on the configured statistical distribution.

freeze()

Freeze all params for inference.

get_buffer(target)

Return the buffer given by target if it exists, otherwise throw an error.

get_extra_state()

Return any extra state to include in the module's state_dict.

get_parameter(target)

Return the parameter given by target if it exists, otherwise throw an error.

get_submodule(target)

Return the submodule given by target if it exists, otherwise throw an error.

half()

See torch.nn.Module.half().

ipu([device])

Move all model parameters and buffers to the IPU.

load_from_checkpoint(checkpoint_path[, ...])

Primary way of loading a model from a checkpoint.

load_state_dict(state_dict[, strict, assign])

Copy parameters and buffers from state_dict into this module and its descendants.

log(name, value[, prog_bar, logger, ...])

Log a key, value pair.

log_dict(dictionary[, prog_bar, logger, ...])

Log a dictionary of values at once.

lr_scheduler_step(scheduler, metric)

Override this method to adjust the default way the Trainer calls each scheduler.

lr_schedulers()

Returns the learning rate scheduler(s) that are being used during training.

manual_backward(loss, *args, **kwargs)

Call this directly from your training_step() when doing optimizations manually.

modules()

Return an iterator over all modules in the network.

named_buffers([prefix, recurse, ...])

Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.

named_children()

Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

named_modules([memo, prefix, remove_duplicate])

Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

named_parameters([prefix, recurse, ...])

Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.

on_after_backward()

Called after loss.backward() and before optimizers are stepped.

on_after_batch_transfer(batch, dataloader_idx)

Override to alter or apply batch augmentations to your batch after it is transferred to the device.

on_before_backward(loss)

Called before loss.backward().

on_before_batch_transfer(batch, dataloader_idx)

Override to alter or apply batch augmentations to your batch before it is transferred to the device.

on_before_optimizer_step(optimizer)

Called before optimizer.step().

on_before_zero_grad(optimizer)

Called after training_step() and before optimizer.zero_grad().

on_fit_end()

Called at the very end of fit.

on_fit_start()

Called at the very beginning of fit.

on_load_checkpoint(checkpoint)

Called by Lightning to restore your model.

on_predict_batch_end(outputs, batch, batch_idx)

Called in the predict loop after the batch.

on_predict_batch_start(batch, batch_idx[, ...])

Called in the predict loop before anything happens for that batch.

on_predict_end()

Called at the end of predicting.

on_predict_epoch_end()

Called at the end of predicting.

on_predict_epoch_start()

Called at the beginning of predicting.

on_predict_model_eval()

Called when the predict loop starts.

on_predict_start()

Called at the beginning of predicting.

on_save_checkpoint(checkpoint)

Called by Lightning when saving a checkpoint to give you a chance to store anything else you might want to save.

on_test_batch_end(outputs, batch, batch_idx)

Called in the test loop after the batch.

on_test_batch_start(batch, batch_idx[, ...])

Called in the test loop before anything happens for that batch.

on_test_end()

Called at the end of testing.

on_test_epoch_end()

Called in the test loop at the very end of the epoch.

on_test_epoch_start()

Called in the test loop at the very beginning of the epoch.

on_test_model_eval()

Called when the test loop starts.

on_test_model_train()

Called when the test loop ends.

on_test_start()

Called at the beginning of testing.

on_train_batch_end(outputs, batch, batch_idx)

Called in the training loop after the batch.

on_train_batch_start(batch, batch_idx)

Called in the training loop before anything happens for that batch.

on_train_end()

Called at the end of training before logger experiment is closed.

on_train_epoch_end()

Called in the training loop at the very end of the epoch.

on_train_epoch_start()

Called in the training loop at the very beginning of the epoch.

on_train_start()

Called at the beginning of training after sanity check.

on_validation_batch_end(outputs, batch, ...)

Called in the validation loop after the batch.

on_validation_batch_start(batch, batch_idx)

Called in the validation loop before anything happens for that batch.

on_validation_end()

Called at the end of validation.

on_validation_epoch_end()

Called in the validation loop at the very end of the epoch.

on_validation_epoch_start()

Called in the validation loop at the very beginning of the epoch.

on_validation_model_eval()

Called when the validation loop starts.

on_validation_model_train()

Called when the validation loop ends.

on_validation_model_zero_grad()

Called by the training loop to release gradients before entering the validation loop.

on_validation_start()

Called at the beginning of validation.

optimizer_step(epoch, batch_idx, optimizer)

Override this method to adjust the default way the Trainer calls the optimizer.

optimizer_zero_grad(epoch, batch_idx, optimizer)

Override this method to change the default behaviour of optimizer.zero_grad().

optimizers([use_pl_optimizer])

Returns the optimizer(s) that are being used during training.

parameters([recurse])

Return an iterator over module parameters.

predict_dataloader()

An iterable or collection of iterables specifying prediction samples.

predict_step(*args, **kwargs)

Step function called during predict().

prepare_data()

Use this to download and prepare data.

print(*args, **kwargs)

Prints only from process 0.

register_backward_hook(hook)

Register a backward hook on the module.

register_buffer(name, tensor[, persistent])

Add a buffer to the module.

register_forward_hook(hook, *[, prepend, ...])

Register a forward hook on the module.

register_forward_pre_hook(hook, *[, ...])

Register a forward pre-hook on the module.

register_full_backward_hook(hook[, prepend])

Register a backward hook on the module.

register_full_backward_pre_hook(hook[, prepend])

Register a backward pre-hook on the module.

register_load_state_dict_post_hook(hook)

Register a post hook to be run after module's load_state_dict is called.

register_module(name, module)

Alias for add_module().

register_parameter(name, param)

Add a parameter to the module.

register_state_dict_pre_hook(hook)

Register a pre-hook for the state_dict() method.

requires_grad_([requires_grad])

Change if autograd should record operations on parameters in this module.

save_hyperparameters(*args[, ignore, frame, ...])

Save arguments to hparams attribute.

set_extra_state(state)

Set extra state contained in the loaded state_dict.

setup(stage)

Called at the beginning of fit (train + validate), validate, test, or predict.

share_memory()

See torch.Tensor.share_memory_().

state_dict(*args[, destination, prefix, ...])

Return a dictionary containing references to the whole state of the module.

teardown(stage)

Called at the end of fit (train + validate), validate, test, or predict.

test_dataloader()

An iterable or collection of iterables specifying test samples.

test_step(*args, **kwargs)

Operates on a single batch of data from the test set.

to(*args, **kwargs)

See torch.nn.Module.to().

to_empty(*, device[, recurse])

Move the parameters and buffers to the specified device without copying storage.

to_onnx(file_path[, input_sample])

Saves the model in ONNX format.

to_torchscript([file_path, method, ...])

By default compiles the whole model to a ScriptModule.

toggle_optimizer(optimizer)

Makes sure only the gradients of the current optimizer's parameters are calculated in the training step to prevent dangling gradients in multiple-optimizer setup.

train([mode])

Set the module in training mode.

train_dataloader()

An iterable or collection of iterables specifying training samples.

training_step(batch, batch_idx)

Processes a single batch during training, computes the loss using the distribution-specific loss function, and logs training metrics.

transfer_batch_to_device(batch, device, ...)

Override this hook if your DataLoader returns tensors wrapped in a custom data structure.

type(dst_type)

See torch.nn.Module.type().

unfreeze()

Unfreeze all parameters for training.

untoggle_optimizer(optimizer)

Resets the state of required gradients that were toggled with toggle_optimizer().

val_dataloader()

An iterable or collection of iterables specifying validation samples.

validation_step(batch, batch_idx)

Processes a single batch during validation, computes the loss using the distribution-specific loss function, and logs validation metrics.

xpu([device])

Move all model parameters and buffers to the XPU.

zero_grad([set_to_none])

Reset gradients of all model parameters.

- - - - - - -

__call__

-
-
-configure_optimizers()[source]
-

Sets up the model’s optimizer and learning rate scheduler based on the configurations provided.

-
-
Returns:
-

A dictionary containing the optimizer and lr_scheduler configurations.

-
-
Return type:
-

dict

-
-
-
- -
-
-forward(cat_features, num_features)[source]
-

Defines the forward pass of the model, processing both categorical and numerical features, -and returning predictions based on the configured statistical distribution.

-
-
Parameters:
-
    -
  • cat_features (Tensor) – Tensor containing the categorical features.

  • -
  • num_features (Tensor) – Tensor containing the numerical features.

  • -
-
-
Returns:
-

The predictions of the model, typically the parameters of the chosen statistical distribution.

-
-
Return type:
-

Tensor

-
-
-
- -
-
-training_step(batch, batch_idx)[source]
-

Processes a single batch during training, computes the loss using the distribution-specific loss function, -and logs training metrics.

-
-
Parameters:
-
    -
  • batch (tuple) – A batch of data from the DataLoader, containing numerical features, categorical features, and labels.

  • -
  • batch_idx (int) – The index of the batch within the epoch.

  • -
-
-
Returns:
-

The computed loss for the batch.

-
-
Return type:
-

Tensor

-
-
-
- -
-
-validation_step(batch, batch_idx)[source]
-

Processes a single batch during validation, computes the loss using the distribution-specific loss function, -and logs validation metrics.

-
-
Parameters:
-
    -
  • batch (tuple) – A batch of data from the DataLoader, containing numerical features, categorical features, and labels.

  • -
  • batch_idx (int) – The index of the batch within the epoch.

  • -
-
-
-
- -
- -
-
-class mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier(num_classes, config, cat_feature_info=None, num_feature_info=None, lr=0.001, lr_patience=10, weight_decay=0.025, lr_factor=0.75, seq_size=20, raw_embeddings=False)[source]
-

A specialized classification module for protein data, built on PyTorch Lightning and integrating the Mamba architecture. -It supports embeddings for categorical features and can process raw or embedded numerical features, making it suitable -for complex protein sequence data.

-
-
Parameters:
-
    -
  • config (MambularConfig) – Configuration parameters for the model architecture.

  • -
  • cat_feature_info (dict, optional) – Information about categorical features, mapping feature names to the number of unique categories.

  • -
  • num_feature_info (dict, optional) – Information about numerical features, mapping feature names to their number of dimensions after embedding.

  • -
  • lr (float, optional) – Learning rate for the optimizer. Defaults to 1e-03.

  • -
  • lr_patience (int, optional) – Number of epochs with no improvement after which learning rate will be reduced. Defaults to 10.

  • -
  • weight_decay (float, optional) – Weight decay coefficient for regularization in the optimizer. Defaults to 0.025.

  • -
  • lr_factor (float, optional) – Factor by which the learning rate will be reduced by the scheduler. Defaults to 0.75.

  • -
  • seq_size (int, optional) – Size of sequence chunks for processing numerical features. Relevant when raw_embeddings is False.

  • -
  • raw_embeddings (bool, optional) – Indicates whether to use raw numerical features directly or to process them into embeddings. Defaults to False.

  • -
-
-
-
-
-mamba
-

The core neural network module implementing the Mamba architecture.

-
-
Type:
-

Mamba

-
-
-
- -
-
-norm_f
-

Normalization layer applied after the Mamba block.

-
-
Type:
-

nn.Module

-
-
-
- -
-
-tabular_head
-

Final linear layer mapping the features to the target.

-
-
Type:
-

nn.Linear

-
-
-
- -
-
-forward(cat_features, num_features)[source]
-

Defines the forward pass of the model.

-
- -
-
-training_step(batch, batch_idx)[source]
-

Processes a single batch during training.

-
- -
-
-validation_step(batch, batch_idx)[source]
-

Processes a single batch during validation.

-
- -
-
-configure_optimizers()[source]
-

Sets up the model’s optimizer and learning rate scheduler.

-
- -
-
Attributes:
-
-
automatic_optimization

If set to False you are responsible for calling .backward(), .step(), .zero_grad().

-
-
current_epoch

The current epoch in the Trainer, or 0 if not attached.

-
-
device
-
dtype
-
example_input_array

The example input array is a specification of what the module can consume in the forward() method.

-
-
fabric
-
global_rank

The index of the current process across all nodes and devices.

-
-
global_step

Total training batches seen across all epochs.

-
-
hparams

The collection of hyperparameters saved with save_hyperparameters().

-
-
hparams_initial

The collection of hyperparameters saved with save_hyperparameters().

-
-
local_rank

The index of the current process within a single node.

-
-
logger

Reference to the logger object in the Trainer.

-
-
loggers

Reference to the list of loggers in the Trainer.

-
-
on_gpu

Returns True if this model is currently located on a GPU.

-
-
strict_loading

Determines how Lightning loads this model using .load_state_dict(…, strict=model.strict_loading).

-
-
trainer
-
-
-
-

Methods

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

add_module(name, module)

Add a child module to the current module.

all_gather(data[, group, sync_grads])

Gather tensors or collections of tensors from multiple processes.

apply(fn)

Apply fn recursively to every submodule (as returned by .children()) as well as self.

backward(loss, *args, **kwargs)

Called to perform backward on the loss returned in training_step().

bfloat16()

Casts all floating point parameters and buffers to bfloat16 datatype.

buffers([recurse])

Return an iterator over module buffers.

children()

Return an iterator over immediate children modules.

clip_gradients(optimizer[, ...])

Handles gradient clipping internally.

compile(*args, **kwargs)

Compile this Module's forward using torch.compile().

configure_callbacks()

Configure model-specific callbacks.

configure_gradient_clipping(optimizer[, ...])

Perform gradient clipping for the optimizer parameters.

configure_model()

Hook to create modules in a strategy and precision aware context.

configure_optimizers()

Sets up the model's optimizer and learning rate scheduler based on the configurations provided.

configure_sharded_model()

Deprecated.

cpu()

See torch.nn.Module.cpu().

cuda([device])

Moves all model parameters and buffers to the GPU.

double()

See torch.nn.Module.double().

eval()

Set the module in evaluation mode.

extra_repr()

Set the extra representation of the module.

float()

See torch.nn.Module.float().

forward(cat_features, num_features)

Defines the forward pass of the model, processing both categorical and numerical features, and returning regression predictions.

freeze()

Freeze all params for inference.

get_buffer(target)

Return the buffer given by target if it exists, otherwise throw an error.

get_extra_state()

Return any extra state to include in the module's state_dict.

get_parameter(target)

Return the parameter given by target if it exists, otherwise throw an error.

get_submodule(target)

Return the submodule given by target if it exists, otherwise throw an error.

half()

See torch.nn.Module.half().

ipu([device])

Move all model parameters and buffers to the IPU.

load_from_checkpoint(checkpoint_path[, ...])

Primary way of loading a model from a checkpoint.

load_state_dict(state_dict[, strict, assign])

Copy parameters and buffers from state_dict into this module and its descendants.

log(name, value[, prog_bar, logger, ...])

Log a key, value pair.

log_dict(dictionary[, prog_bar, logger, ...])

Log a dictionary of values at once.

lr_scheduler_step(scheduler, metric)

Override this method to adjust the default way the Trainer calls each scheduler.

lr_schedulers()

Returns the learning rate scheduler(s) that are being used during training.

manual_backward(loss, *args, **kwargs)

Call this directly from your training_step() when doing optimizations manually.

modules()

Return an iterator over all modules in the network.

named_buffers([prefix, recurse, ...])

Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.

named_children()

Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

named_modules([memo, prefix, remove_duplicate])

Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

named_parameters([prefix, recurse, ...])

Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.

on_after_backward()

Called after loss.backward() and before optimizers are stepped.

on_after_batch_transfer(batch, dataloader_idx)

Override to alter or apply batch augmentations to your batch after it is transferred to the device.

on_before_backward(loss)

Called before loss.backward().

on_before_batch_transfer(batch, dataloader_idx)

Override to alter or apply batch augmentations to your batch before it is transferred to the device.

on_before_optimizer_step(optimizer)

Called before optimizer.step().

on_before_zero_grad(optimizer)

Called after training_step() and before optimizer.zero_grad().

on_fit_end()

Called at the very end of fit.

on_fit_start()

Called at the very beginning of fit.

on_load_checkpoint(checkpoint)

Called by Lightning to restore your model.

on_predict_batch_end(outputs, batch, batch_idx)

Called in the predict loop after the batch.

on_predict_batch_start(batch, batch_idx[, ...])

Called in the predict loop before anything happens for that batch.

on_predict_end()

Called at the end of predicting.

on_predict_epoch_end()

Called at the end of predicting.

on_predict_epoch_start()

Called at the beginning of predicting.

on_predict_model_eval()

Called when the predict loop starts.

on_predict_start()

Called at the beginning of predicting.

on_save_checkpoint(checkpoint)

Called by Lightning when saving a checkpoint to give you a chance to store anything else you might want to save.

on_test_batch_end(outputs, batch, batch_idx)

Called in the test loop after the batch.

on_test_batch_start(batch, batch_idx[, ...])

Called in the test loop before anything happens for that batch.

on_test_end()

Called at the end of testing.

on_test_epoch_end()

Called in the test loop at the very end of the epoch.

on_test_epoch_start()

Called in the test loop at the very beginning of the epoch.

on_test_model_eval()

Called when the test loop starts.

on_test_model_train()

Called when the test loop ends.

on_test_start()

Called at the beginning of testing.

on_train_batch_end(outputs, batch, batch_idx)

Called in the training loop after the batch.

on_train_batch_start(batch, batch_idx)

Called in the training loop before anything happens for that batch.

on_train_end()

Called at the end of training before logger experiment is closed.

on_train_epoch_end()

Called in the training loop at the very end of the epoch.

on_train_epoch_start()

Called in the training loop at the very beginning of the epoch.

on_train_start()

Called at the beginning of training after sanity check.

on_validation_batch_end(outputs, batch, ...)

Called in the validation loop after the batch.

on_validation_batch_start(batch, batch_idx)

Called in the validation loop before anything happens for that batch.

on_validation_end()

Called at the end of validation.

on_validation_epoch_end()

Called in the validation loop at the very end of the epoch.

on_validation_epoch_start()

Called in the validation loop at the very beginning of the epoch.

on_validation_model_eval()

Called when the validation loop starts.

on_validation_model_train()

Called when the validation loop ends.

on_validation_model_zero_grad()

Called by the training loop to release gradients before entering the validation loop.

on_validation_start()

Called at the beginning of validation.

optimizer_step(epoch, batch_idx, optimizer)

Override this method to adjust the default way the Trainer calls the optimizer.

optimizer_zero_grad(epoch, batch_idx, optimizer)

Override this method to change the default behaviour of optimizer.zero_grad().

optimizers([use_pl_optimizer])

Returns the optimizer(s) that are being used during training.

parameters([recurse])

Return an iterator over module parameters.

predict_dataloader()

An iterable or collection of iterables specifying prediction samples.

predict_step(*args, **kwargs)

Step function called during predict().

prepare_data()

Use this to download and prepare data.

print(*args, **kwargs)

Prints only from process 0.

register_backward_hook(hook)

Register a backward hook on the module.

register_buffer(name, tensor[, persistent])

Add a buffer to the module.

register_forward_hook(hook, *[, prepend, ...])

Register a forward hook on the module.

register_forward_pre_hook(hook, *[, ...])

Register a forward pre-hook on the module.

register_full_backward_hook(hook[, prepend])

Register a backward hook on the module.

register_full_backward_pre_hook(hook[, prepend])

Register a backward pre-hook on the module.

register_load_state_dict_post_hook(hook)

Register a post hook to be run after module's load_state_dict is called.

register_module(name, module)

Alias for add_module().

register_parameter(name, param)

Add a parameter to the module.

register_state_dict_pre_hook(hook)

Register a pre-hook for the state_dict() method.

requires_grad_([requires_grad])

Change if autograd should record operations on parameters in this module.

save_hyperparameters(*args[, ignore, frame, ...])

Save arguments to hparams attribute.

set_extra_state(state)

Set extra state contained in the loaded state_dict.

setup(stage)

Called at the beginning of fit (train + validate), validate, test, or predict.

share_memory()

See torch.Tensor.share_memory_().

state_dict(*args[, destination, prefix, ...])

Return a dictionary containing references to the whole state of the module.

teardown(stage)

Called at the end of fit (train + validate), validate, test, or predict.

test_dataloader()

An iterable or collection of iterables specifying test samples.

test_step(*args, **kwargs)

Operates on a single batch of data from the test set.

to(*args, **kwargs)

See torch.nn.Module.to().

to_empty(*, device[, recurse])

Move the parameters and buffers to the specified device without copying storage.

to_onnx(file_path[, input_sample])

Saves the model in ONNX format.

to_torchscript([file_path, method, ...])

By default compiles the whole model to a ScriptModule.

toggle_optimizer(optimizer)

Makes sure only the gradients of the current optimizer's parameters are calculated in the training step to prevent dangling gradients in multiple-optimizer setup.

train([mode])

Set the module in training mode.

train_dataloader()

An iterable or collection of iterables specifying training samples.

training_step(batch, batch_idx)

Processes a single batch during training, computes the loss, and logs training metrics.

transfer_batch_to_device(batch, device, ...)

Override this hook if your DataLoader returns tensors wrapped in a custom data structure.

type(dst_type)

See torch.nn.Module.type().

unfreeze()

Unfreeze all parameters for training.

untoggle_optimizer(optimizer)

Resets the state of required gradients that were toggled with toggle_optimizer().

val_dataloader()

An iterable or collection of iterables specifying validation samples.

validation_step(batch, batch_idx)

Processes a single batch during validation, computes the loss, and logs validation metrics.

xpu([device])

Move all model parameters and buffers to the XPU.

zero_grad([set_to_none])

Reset gradients of all model parameters.

- - - - - - -

__call__

-
-
-configure_optimizers()[source]
-

Sets up the model’s optimizer and learning rate scheduler based on the configurations provided.

-
-
Returns:
-

A dictionary containing the optimizer and lr_scheduler configurations.

-
-
Return type:
-

dict

-
-
-
- -
-
-forward(cat_features, num_features)[source]
-

Defines the forward pass of the model, processing both categorical and numerical features, -and returning regression predictions.

-
-
Parameters:
-
    -
  • cat_features (Tensor) – Tensor containing the categorical features.

  • -
  • num_features (Tensor) – Tensor containing the numerical features or raw sequence data, depending on raw_embeddings.

  • -
-
-
Returns:
-

The output predictions of the model for regression tasks.

-
-
Return type:
-

Tensor

-
-
-
- -
-
-training_step(batch, batch_idx)[source]
-

Processes a single batch during training, computes the loss, and logs training metrics.

-
-
Parameters:
-
    -
  • batch (tuple) – A batch of data from the DataLoader, containing numerical features, categorical features, and labels.

  • -
  • batch_idx (int) – The index of the batch within the epoch.

  • -
-
-
Returns:
-

The computed loss for the batch.

-
-
Return type:
-

Tensor

-
-
-
- -
-
-validation_step(batch, batch_idx)[source]
-

Processes a single batch during validation, computes the loss, and logs validation metrics.

-
-
Parameters:
-
    -
  • batch (tuple) – A batch of data from the DataLoader, containing numerical features, categorical features, and labels.

  • -
  • batch_idx (int) – The index of the batch within the epoch.

  • -
-
-
-
- -
- -
-
-class mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor(config, cat_feature_info=None, num_feature_info=None, lr=0.001, lr_patience=10, weight_decay=0.025, lr_factor=0.75, seq_size=20, raw_embeddings=False)[source]
-

A specialized regression module for protein data, built on PyTorch Lightning and integrating the Mamba architecture. -It supports embeddings for categorical features and can process raw or embedded numerical features, making it suitable -for complex protein sequence data.

-
-
Parameters:
-
    -
  • config (MambularConfig) – Configuration parameters for the model architecture.

  • -
  • cat_feature_info (dict, optional) – Information about categorical features, mapping feature names to the number of unique categories. Defaults to None.

  • -
  • num_feature_info (dict, optional) – Information about numerical features, mapping feature names to their number of dimensions after embedding. Defaults to None.

  • -
  • lr (float, optional) – Learning rate for the optimizer. Defaults to 1e-03.

  • -
  • lr_patience (int, optional) – Number of epochs with no improvement after which learning rate will be reduced. Defaults to 10.

  • -
  • weight_decay (float, optional) – Weight decay coefficient for regularization in the optimizer. Defaults to 0.025.

  • -
  • lr_factor (float, optional) – Factor by which the learning rate will be reduced by the scheduler. Defaults to 0.75.

  • -
  • seq_size (int, optional) – Size of sequence chunks for processing numerical features. Relevant when raw_embeddings is False.

  • -
  • raw_embeddings (bool, optional) – Indicates whether to use raw numerical features directly or to process them into embeddings. Defaults to False.

  • -
-
-
-
-
-mamba
-

The core neural network module implementing the Mamba architecture.

-
-
Type:
-

Mamba

-
-
-
- -
-
-norm_f
-

Normalization layer applied after the Mamba block.

-
-
Type:
-

nn.Module

-
-
-
- -
-
-tabular_head
-

Final linear layer mapping the features to the regression target.

-
-
Type:
-

nn.Linear

-
-
-
- -
-
-loss_fct
-

The loss function for regression tasks.

-
-
Type:
-

nn.MSELoss

-
-
-
- -
-
-forward(cat_features, num_features)[source]
-

Defines the forward pass of the model.

-
- -
-
-training_step(batch, batch_idx)[source]
-

Processes a single batch during training.

-
- -
-
-validation_step(batch, batch_idx)[source]
-

Processes a single batch during validation.

-
- -
-
-configure_optimizers()[source]
-

Sets up the model’s optimizer and learning rate scheduler.

-
- -
-
Attributes:
-
-
automatic_optimization

If set to False you are responsible for calling .backward(), .step(), .zero_grad().

-
-
current_epoch

The current epoch in the Trainer, or 0 if not attached.

-
-
device
-
dtype
-
example_input_array

The example input array is a specification of what the module can consume in the forward() method.

-
-
fabric
-
global_rank

The index of the current process across all nodes and devices.

-
-
global_step

Total training batches seen across all epochs.

-
-
hparams

The collection of hyperparameters saved with save_hyperparameters().

-
-
hparams_initial

The collection of hyperparameters saved with save_hyperparameters().

-
-
local_rank

The index of the current process within a single node.

-
-
logger

Reference to the logger object in the Trainer.

-
-
loggers

Reference to the list of loggers in the Trainer.

-
-
on_gpu

Returns True if this model is currently located on a GPU.

-
-
strict_loading

Determines how Lightning loads this model using .load_state_dict(…, strict=model.strict_loading).

-
-
trainer
-
-
-
-

Methods

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

add_module(name, module)

Add a child module to the current module.

all_gather(data[, group, sync_grads])

Gather tensors or collections of tensors from multiple processes.

apply(fn)

Apply fn recursively to every submodule (as returned by .children()) as well as self.

backward(loss, *args, **kwargs)

Called to perform backward on the loss returned in training_step().

bfloat16()

Casts all floating point parameters and buffers to bfloat16 datatype.

buffers([recurse])

Return an iterator over module buffers.

children()

Return an iterator over immediate children modules.

clip_gradients(optimizer[, ...])

Handles gradient clipping internally.

compile(*args, **kwargs)

Compile this Module's forward using torch.compile().

configure_callbacks()

Configure model-specific callbacks.

configure_gradient_clipping(optimizer[, ...])

Perform gradient clipping for the optimizer parameters.

configure_model()

Hook to create modules in a strategy and precision aware context.

configure_optimizers()

Sets up the model's optimizer and learning rate scheduler based on the configurations provided.

configure_sharded_model()

Deprecated.

cpu()

See torch.nn.Module.cpu().

cuda([device])

Moves all model parameters and buffers to the GPU.

double()

See torch.nn.Module.double().

eval()

Set the module in evaluation mode.

extra_repr()

Set the extra representation of the module.

float()

See torch.nn.Module.float().

forward(cat_features, num_features)

Defines the forward pass of the model, processing both categorical and numerical features, and returning regression predictions.

freeze()

Freeze all params for inference.

get_buffer(target)

Return the buffer given by target if it exists, otherwise throw an error.

get_extra_state()

Return any extra state to include in the module's state_dict.

get_parameter(target)

Return the parameter given by target if it exists, otherwise throw an error.

get_submodule(target)

Return the submodule given by target if it exists, otherwise throw an error.

half()

See torch.nn.Module.half().

ipu([device])

Move all model parameters and buffers to the IPU.

load_from_checkpoint(checkpoint_path[, ...])

Primary way of loading a model from a checkpoint.

load_state_dict(state_dict[, strict, assign])

Copy parameters and buffers from state_dict into this module and its descendants.

log(name, value[, prog_bar, logger, ...])

Log a key, value pair.

log_dict(dictionary[, prog_bar, logger, ...])

Log a dictionary of values at once.

lr_scheduler_step(scheduler, metric)

Override this method to adjust the default way the Trainer calls each scheduler.

lr_schedulers()

Returns the learning rate scheduler(s) that are being used during training.

manual_backward(loss, *args, **kwargs)

Call this directly from your training_step() when doing optimizations manually.

modules()

Return an iterator over all modules in the network.

named_buffers([prefix, recurse, ...])

Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.

named_children()

Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

named_modules([memo, prefix, remove_duplicate])

Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

named_parameters([prefix, recurse, ...])

Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.

on_after_backward()

Called after loss.backward() and before optimizers are stepped.

on_after_batch_transfer(batch, dataloader_idx)

Override to alter or apply batch augmentations to your batch after it is transferred to the device.

on_before_backward(loss)

Called before loss.backward().

on_before_batch_transfer(batch, dataloader_idx)

Override to alter or apply batch augmentations to your batch before it is transferred to the device.

on_before_optimizer_step(optimizer)

Called before optimizer.step().

on_before_zero_grad(optimizer)

Called after training_step() and before optimizer.zero_grad().

on_fit_end()

Called at the very end of fit.

on_fit_start()

Called at the very beginning of fit.

on_load_checkpoint(checkpoint)

Called by Lightning to restore your model.

on_predict_batch_end(outputs, batch, batch_idx)

Called in the predict loop after the batch.

on_predict_batch_start(batch, batch_idx[, ...])

Called in the predict loop before anything happens for that batch.

on_predict_end()

Called at the end of predicting.

on_predict_epoch_end()

Called at the end of predicting.

on_predict_epoch_start()

Called at the beginning of predicting.

on_predict_model_eval()

Called when the predict loop starts.

on_predict_start()

Called at the beginning of predicting.

on_save_checkpoint(checkpoint)

Called by Lightning when saving a checkpoint to give you a chance to store anything else you might want to save.

on_test_batch_end(outputs, batch, batch_idx)

Called in the test loop after the batch.

on_test_batch_start(batch, batch_idx[, ...])

Called in the test loop before anything happens for that batch.

on_test_end()

Called at the end of testing.

on_test_epoch_end()

Called in the test loop at the very end of the epoch.

on_test_epoch_start()

Called in the test loop at the very beginning of the epoch.

on_test_model_eval()

Called when the test loop starts.

on_test_model_train()

Called when the test loop ends.

on_test_start()

Called at the beginning of testing.

on_train_batch_end(outputs, batch, batch_idx)

Called in the training loop after the batch.

on_train_batch_start(batch, batch_idx)

Called in the training loop before anything happens for that batch.

on_train_end()

Called at the end of training before logger experiment is closed.

on_train_epoch_end()

Called in the training loop at the very end of the epoch.

on_train_epoch_start()

Called in the training loop at the very beginning of the epoch.

on_train_start()

Called at the beginning of training after sanity check.

on_validation_batch_end(outputs, batch, ...)

Called in the validation loop after the batch.

on_validation_batch_start(batch, batch_idx)

Called in the validation loop before anything happens for that batch.

on_validation_end()

Called at the end of validation.

on_validation_epoch_end()

Called in the validation loop at the very end of the epoch.

on_validation_epoch_start()

Called in the validation loop at the very beginning of the epoch.

on_validation_model_eval()

Called when the validation loop starts.

on_validation_model_train()

Called when the validation loop ends.

on_validation_model_zero_grad()

Called by the training loop to release gradients before entering the validation loop.

on_validation_start()

Called at the beginning of validation.

optimizer_step(epoch, batch_idx, optimizer)

Override this method to adjust the default way the Trainer calls the optimizer.

optimizer_zero_grad(epoch, batch_idx, optimizer)

Override this method to change the default behaviour of optimizer.zero_grad().

optimizers([use_pl_optimizer])

Returns the optimizer(s) that are being used during training.

parameters([recurse])

Return an iterator over module parameters.

predict_dataloader()

An iterable or collection of iterables specifying prediction samples.

predict_step(*args, **kwargs)

Step function called during predict().

prepare_data()

Use this to download and prepare data.

print(*args, **kwargs)

Prints only from process 0.

register_backward_hook(hook)

Register a backward hook on the module.

register_buffer(name, tensor[, persistent])

Add a buffer to the module.

register_forward_hook(hook, *[, prepend, ...])

Register a forward hook on the module.

register_forward_pre_hook(hook, *[, ...])

Register a forward pre-hook on the module.

register_full_backward_hook(hook[, prepend])

Register a backward hook on the module.

register_full_backward_pre_hook(hook[, prepend])

Register a backward pre-hook on the module.

register_load_state_dict_post_hook(hook)

Register a post hook to be run after module's load_state_dict is called.

register_module(name, module)

Alias for add_module().

register_parameter(name, param)

Add a parameter to the module.

register_state_dict_pre_hook(hook)

Register a pre-hook for the state_dict() method.

requires_grad_([requires_grad])

Change if autograd should record operations on parameters in this module.

save_hyperparameters(*args[, ignore, frame, ...])

Save arguments to hparams attribute.

set_extra_state(state)

Set extra state contained in the loaded state_dict.

setup(stage)

Called at the beginning of fit (train + validate), validate, test, or predict.

share_memory()

See torch.Tensor.share_memory_().

state_dict(*args[, destination, prefix, ...])

Return a dictionary containing references to the whole state of the module.

teardown(stage)

Called at the end of fit (train + validate), validate, test, or predict.

test_dataloader()

An iterable or collection of iterables specifying test samples.

test_step(*args, **kwargs)

Operates on a single batch of data from the test set.

to(*args, **kwargs)

See torch.nn.Module.to().

to_empty(*, device[, recurse])

Move the parameters and buffers to the specified device without copying storage.

to_onnx(file_path[, input_sample])

Saves the model in ONNX format.

to_torchscript([file_path, method, ...])

By default compiles the whole model to a ScriptModule.

toggle_optimizer(optimizer)

Makes sure only the gradients of the current optimizer's parameters are calculated in the training step to prevent dangling gradients in multiple-optimizer setup.

train([mode])

Set the module in training mode.

train_dataloader()

An iterable or collection of iterables specifying training samples.

training_step(batch, batch_idx)

Processes a single batch during training, computes the loss, and logs training metrics.

transfer_batch_to_device(batch, device, ...)

Override this hook if your DataLoader returns tensors wrapped in a custom data structure.

type(dst_type)

See torch.nn.Module.type().

unfreeze()

Unfreeze all parameters for training.

untoggle_optimizer(optimizer)

Resets the state of required gradients that were toggled with toggle_optimizer().

val_dataloader()

An iterable or collection of iterables specifying validation samples.

validation_step(batch, batch_idx)

Processes a single batch during validation, computes the loss, and logs validation metrics.

xpu([device])

Move all model parameters and buffers to the XPU.

zero_grad([set_to_none])

Reset gradients of all model parameters.

- - - - - - -

__call__

-
-
-configure_optimizers()[source]
-

Sets up the model’s optimizer and learning rate scheduler based on the configurations provided.

-
-
Returns:
-

A dictionary containing the optimizer and lr_scheduler configurations.

-
-
Return type:
-

dict

-
-
-
- -
-
-forward(cat_features, num_features)[source]
-

Defines the forward pass of the model, processing both categorical and numerical features, -and returning regression predictions.

-
-
Parameters:
-
    -
  • cat_features (Tensor) – Tensor containing the categorical features.

  • -
  • num_features (Tensor) – Tensor containing the numerical features or raw sequence data, depending on raw_embeddings.

  • -
-
-
Returns:
-

The output predictions of the model for regression tasks.

-
-
Return type:
-

Tensor

-
-
-
- -
-
-training_step(batch, batch_idx)[source]
-

Processes a single batch during training, computes the loss, and logs training metrics.

-
-
Parameters:
-
    -
  • batch (tuple) – A batch of data from the DataLoader, containing numerical features, categorical features, and labels.

  • -
  • batch_idx (int) – The index of the batch within the epoch.

  • -
-
-
Returns:
-

The computed loss for the batch.

-
-
Return type:
-

Tensor

-
-
-
- -
-
-validation_step(batch, batch_idx)[source]
-

Processes a single batch during validation, computes the loss, and logs validation metrics.

-
-
Parameters:
-
    -
  • batch (tuple) – A batch of data from the DataLoader, containing numerical features, categorical features, and labels.

  • -
  • batch_idx (int) – The index of the batch within the epoch.

  • -
-
-
-
- -
- -
-
-class mambular.base_models.regressor.BaseMambularRegressor(config, cat_feature_info=None, num_feature_info=None, lr=0.001, lr_patience=10, weight_decay=0.025, lr_factor=0.75)[source]
-

A base regression module for tabular data built on PyTorch Lightning. It incorporates embeddings -for categorical and numerical features with a configurable architecture provided by MambularConfig. -This module is designed for regression tasks.

-
-
Parameters:
-
    -
  • config (MambularConfig) – An instance of MambularConfig containing configuration parameters for the model architecture.

  • -
  • cat_feature_info (dict, optional) – A dictionary mapping the names of categorical features to their number of unique categories. Defaults to None.

  • -
  • num_feature_info (dict, optional) – A dictionary mapping the names of numerical features to their number of dimensions after embedding. Defaults to None.

  • -
  • lr (float, optional) – The initial learning rate for the optimizer. Defaults to 1e-03.

  • -
  • lr_patience (int, optional) – The number of epochs with no improvement after which learning rate will be reduced. Defaults to 10.

  • -
  • weight_decay (float, optional) – Weight decay (L2 penalty) coefficient. Defaults to 0.025.

  • -
  • lr_factor (float, optional) – Factor by which the learning rate will be reduced. Defaults to 0.75.

  • -
-
-
-
-
-mamba
-

The core neural network module implementing the Mamba architecture.

-
-
Type:
-

Mamba

-
-
-
- -
-
-norm_f
-

Normalization layer applied after the Mamba block.

-
-
Type:
-

nn.Module

-
-
-
- -
-
-tabular_head
-

Final linear layer mapping the features to a single output for regression tasks.

-
-
Type:
-

nn.Linear

-
-
-
- -
-
-train_mse
-

Metric computation module for training Mean Squared Error.

-
-
Type:
-

torchmetrics.MeanSquaredError

-
-
-
- -
-
-val_mse
-

Metric computation module for validation Mean Squared Error.

-
-
Type:
-

torchmetrics.MeanSquaredError

-
-
-
- -
-
-loss_fct
-

The loss function for regression tasks.

-
-
Type:
-

torch.nn.MSELoss

-
-
-
- -
-
-forward(cat_features, num_features)[source]
-

Defines the forward pass of the model.

-
- -
-
-training_step(batch, batch_idx)[source]
-

Processes a single batch during training.

-
- -
-
-validation_step(batch, batch_idx)[source]
-

Processes a single batch during validation.

-
- -
-
-configure_optimizers()[source]
-

Sets up the model’s optimizer and learning rate scheduler.

-
- -
-
Attributes:
-
-
automatic_optimization

If set to False you are responsible for calling .backward(), .step(), .zero_grad().

-
-
current_epoch

The current epoch in the Trainer, or 0 if not attached.

-
-
device
-
dtype
-
example_input_array

The example input array is a specification of what the module can consume in the forward() method.

-
-
fabric
-
global_rank

The index of the current process across all nodes and devices.

-
-
global_step

Total training batches seen across all epochs.

-
-
hparams

The collection of hyperparameters saved with save_hyperparameters().

-
-
hparams_initial

The collection of hyperparameters saved with save_hyperparameters().

-
-
local_rank

The index of the current process within a single node.

-
-
logger

Reference to the logger object in the Trainer.

-
-
loggers

Reference to the list of loggers in the Trainer.

-
-
on_gpu

Returns True if this model is currently located on a GPU.

-
-
strict_loading

Determines how Lightning loads this model using .load_state_dict(…, strict=model.strict_loading).

-
-
trainer
-
-
-
-

Methods

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

add_module(name, module)

Add a child module to the current module.

all_gather(data[, group, sync_grads])

Gather tensors or collections of tensors from multiple processes.

apply(fn)

Apply fn recursively to every submodule (as returned by .children()) as well as self.

backward(loss, *args, **kwargs)

Called to perform backward on the loss returned in training_step().

bfloat16()

Casts all floating point parameters and buffers to bfloat16 datatype.

buffers([recurse])

Return an iterator over module buffers.

children()

Return an iterator over immediate children modules.

clip_gradients(optimizer[, ...])

Handles gradient clipping internally.

compile(*args, **kwargs)

Compile this Module's forward using torch.compile().

configure_callbacks()

Configure model-specific callbacks.

configure_gradient_clipping(optimizer[, ...])

Perform gradient clipping for the optimizer parameters.

configure_model()

Hook to create modules in a strategy and precision aware context.

configure_optimizers()

Sets up the model's optimizer and learning rate scheduler based on the configurations provided.

configure_sharded_model()

Deprecated.

cpu()

See torch.nn.Module.cpu().

cuda([device])

Moves all model parameters and buffers to the GPU.

double()

See torch.nn.Module.double().

eval()

Set the module in evaluation mode.

extra_repr()

Set the extra representation of the module.

float()

See torch.nn.Module.float().

forward(cat_features, num_features)

Defines the forward pass of the regressor.

freeze()

Freeze all params for inference.

get_buffer(target)

Return the buffer given by target if it exists, otherwise throw an error.

get_extra_state()

Return any extra state to include in the module's state_dict.

get_parameter(target)

Return the parameter given by target if it exists, otherwise throw an error.

get_submodule(target)

Return the submodule given by target if it exists, otherwise throw an error.

half()

See torch.nn.Module.half().

ipu([device])

Move all model parameters and buffers to the IPU.

load_from_checkpoint(checkpoint_path[, ...])

Primary way of loading a model from a checkpoint.

load_state_dict(state_dict[, strict, assign])

Copy parameters and buffers from state_dict into this module and its descendants.

log(name, value[, prog_bar, logger, ...])

Log a key, value pair.

log_dict(dictionary[, prog_bar, logger, ...])

Log a dictionary of values at once.

lr_scheduler_step(scheduler, metric)

Override this method to adjust the default way the Trainer calls each scheduler.

lr_schedulers()

Returns the learning rate scheduler(s) that are being used during training.

manual_backward(loss, *args, **kwargs)

Call this directly from your training_step() when doing optimizations manually.

modules()

Return an iterator over all modules in the network.

named_buffers([prefix, recurse, ...])

Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.

named_children()

Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

named_modules([memo, prefix, remove_duplicate])

Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

named_parameters([prefix, recurse, ...])

Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.

on_after_backward()

Called after loss.backward() and before optimizers are stepped.

on_after_batch_transfer(batch, dataloader_idx)

Override to alter or apply batch augmentations to your batch after it is transferred to the device.

on_before_backward(loss)

Called before loss.backward().

on_before_batch_transfer(batch, dataloader_idx)

Override to alter or apply batch augmentations to your batch before it is transferred to the device.

on_before_optimizer_step(optimizer)

Called before optimizer.step().

on_before_zero_grad(optimizer)

Called after training_step() and before optimizer.zero_grad().

on_fit_end()

Called at the very end of fit.

on_fit_start()

Called at the very beginning of fit.

on_load_checkpoint(checkpoint)

Called by Lightning to restore your model.

on_predict_batch_end(outputs, batch, batch_idx)

Called in the predict loop after the batch.

on_predict_batch_start(batch, batch_idx[, ...])

Called in the predict loop before anything happens for that batch.

on_predict_end()

Called at the end of predicting.

on_predict_epoch_end()

Called at the end of predicting.

on_predict_epoch_start()

Called at the beginning of predicting.

on_predict_model_eval()

Called when the predict loop starts.

on_predict_start()

Called at the beginning of predicting.

on_save_checkpoint(checkpoint)

Called by Lightning when saving a checkpoint to give you a chance to store anything else you might want to save.

on_test_batch_end(outputs, batch, batch_idx)

Called in the test loop after the batch.

on_test_batch_start(batch, batch_idx[, ...])

Called in the test loop before anything happens for that batch.

on_test_end()

Called at the end of testing.

on_test_epoch_end()

Called in the test loop at the very end of the epoch.

on_test_epoch_start()

Called in the test loop at the very beginning of the epoch.

on_test_model_eval()

Called when the test loop starts.

on_test_model_train()

Called when the test loop ends.

on_test_start()

Called at the beginning of testing.

on_train_batch_end(outputs, batch, batch_idx)

Called in the training loop after the batch.

on_train_batch_start(batch, batch_idx)

Called in the training loop before anything happens for that batch.

on_train_end()

Called at the end of training before logger experiment is closed.

on_train_epoch_end()

Called in the training loop at the very end of the epoch.

on_train_epoch_start()

Called in the training loop at the very beginning of the epoch.

on_train_start()

Called at the beginning of training after sanity check.

on_validation_batch_end(outputs, batch, ...)

Called in the validation loop after the batch.

on_validation_batch_start(batch, batch_idx)

Called in the validation loop before anything happens for that batch.

on_validation_end()

Called at the end of validation.

on_validation_epoch_end()

Called in the validation loop at the very end of the epoch.

on_validation_epoch_start()

Called in the validation loop at the very beginning of the epoch.

on_validation_model_eval()

Called when the validation loop starts.

on_validation_model_train()

Called when the validation loop ends.

on_validation_model_zero_grad()

Called by the training loop to release gradients before entering the validation loop.

on_validation_start()

Called at the beginning of validation.

optimizer_step(epoch, batch_idx, optimizer)

Override this method to adjust the default way the Trainer calls the optimizer.

optimizer_zero_grad(epoch, batch_idx, optimizer)

Override this method to change the default behaviour of optimizer.zero_grad().

optimizers([use_pl_optimizer])

Returns the optimizer(s) that are being used during training.

parameters([recurse])

Return an iterator over module parameters.

predict_dataloader()

An iterable or collection of iterables specifying prediction samples.

predict_step(*args, **kwargs)

Step function called during predict().

prepare_data()

Use this to download and prepare data.

print(*args, **kwargs)

Prints only from process 0.

register_backward_hook(hook)

Register a backward hook on the module.

register_buffer(name, tensor[, persistent])

Add a buffer to the module.

register_forward_hook(hook, *[, prepend, ...])

Register a forward hook on the module.

register_forward_pre_hook(hook, *[, ...])

Register a forward pre-hook on the module.

register_full_backward_hook(hook[, prepend])

Register a backward hook on the module.

register_full_backward_pre_hook(hook[, prepend])

Register a backward pre-hook on the module.

register_load_state_dict_post_hook(hook)

Register a post hook to be run after module's load_state_dict is called.

register_module(name, module)

Alias for add_module().

register_parameter(name, param)

Add a parameter to the module.

register_state_dict_pre_hook(hook)

Register a pre-hook for the state_dict() method.

requires_grad_([requires_grad])

Change if autograd should record operations on parameters in this module.

save_hyperparameters(*args[, ignore, frame, ...])

Save arguments to hparams attribute.

set_extra_state(state)

Set extra state contained in the loaded state_dict.

setup(stage)

Called at the beginning of fit (train + validate), validate, test, or predict.

share_memory()

See torch.Tensor.share_memory_().

state_dict(*args[, destination, prefix, ...])

Return a dictionary containing references to the whole state of the module.

teardown(stage)

Called at the end of fit (train + validate), validate, test, or predict.

test_dataloader()

An iterable or collection of iterables specifying test samples.

test_step(*args, **kwargs)

Operates on a single batch of data from the test set.

to(*args, **kwargs)

See torch.nn.Module.to().

to_empty(*, device[, recurse])

Move the parameters and buffers to the specified device without copying storage.

to_onnx(file_path[, input_sample])

Saves the model in ONNX format.

to_torchscript([file_path, method, ...])

By default compiles the whole model to a ScriptModule.

toggle_optimizer(optimizer)

Makes sure only the gradients of the current optimizer's parameters are calculated in the training step to prevent dangling gradients in multiple-optimizer setup.

train([mode])

Set the module in training mode.

train_dataloader()

An iterable or collection of iterables specifying training samples.

training_step(batch, batch_idx)

Defines the forward pass of the regressor.

transfer_batch_to_device(batch, device, ...)

Override this hook if your DataLoader returns tensors wrapped in a custom data structure.

type(dst_type)

See torch.nn.Module.type().

unfreeze()

Unfreeze all parameters for training.

untoggle_optimizer(optimizer)

Resets the state of required gradients that were toggled with toggle_optimizer().

val_dataloader()

An iterable or collection of iterables specifying validation samples.

validation_step(batch, batch_idx)

Processes a single batch during validation, computes the loss, and logs validation metrics.

xpu([device])

Move all model parameters and buffers to the XPU.

zero_grad([set_to_none])

Reset gradients of all model parameters.

- - - - - - -

__call__

-
-
-configure_optimizers()[source]
-

Sets up the model’s optimizer and learning rate scheduler based on the configurations provided.

-
-
Returns:
-

A dictionary containing the optimizer and lr_scheduler configurations.

-
-
Return type:
-

dict

-
-
-
- -
-
-forward(cat_features, num_features)[source]
-

Defines the forward pass of the regressor.

-
-
Parameters:
-
    -
  • cat_features (Tensor) – Tensor containing the categorical features.

  • -
  • num_features (Tensor) – Tensor containing the numerical features.

  • -
-
-
Returns:
-

The output predictions of the model for regression tasks.

-
-
Return type:
-

Tensor

-
-
-
- -
-
-training_step(batch, batch_idx)[source]
-

Defines the forward pass of the regressor.

-
-
Parameters:
-
    -
  • cat_features (Tensor) – Tensor containing the categorical features.

  • -
  • num_features (Tensor) – Tensor containing the numerical features.

  • -
-
-
Returns:
-

The output predictions of the model for regression tasks.

-
-
Return type:
-

Tensor

-
-
-
- -
-
-validation_step(batch, batch_idx)[source]
-

Processes a single batch during validation, computes the loss, and logs validation metrics.

-
-
Parameters:
-
    -
  • batch (tuple) – A batch of data from the DataLoader, containing numerical features, categorical features, and labels.

  • -
  • batch_idx (int) – The index of the batch within the epoch.

  • -
-
-
-
- -
- -
- - -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/api/base_models/index.html b/docs/$READTHEDOCS_OUTPUT/html/api/base_models/index.html deleted file mode 100644 index 4d66c07..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/api/base_models/index.html +++ /dev/null @@ -1,158 +0,0 @@ - - - - - - - BaseModels — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
- -
-
-
-
- -
-

BaseModels

-

This module provides base classes for the Mambular models.

- - - - - - - - - - - - - - - - - - - - - - - -

Functionality

Description

BaseMambularClassifier

Multi-class and binary classification tasks.

BaseMambularLSS

Various statistical distribution families for different types of regression and classification tasks.

Base EmbeddingMambularClassifier

Specialized classification module for complex protein sequence data.

BaseEmbeddingMambularRegressor

Specialized regression module for complex protein sequence data.

BaseMambularRegressor

Regression tasks.

-
-
-
- - -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/api/models/Models.html b/docs/$READTHEDOCS_OUTPUT/html/api/models/Models.html deleted file mode 100644 index 2cf7fa0..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/api/models/Models.html +++ /dev/null @@ -1,588 +0,0 @@ - - - - - - - Models — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
- -
-
-
-
- -
-

Models

-
-
-class mambular.models.sklearn_classifier.MambularClassifier(**kwargs)[source]
-

A classifier that mimics scikit-learn’s API using PyTorch Lightning and a custom architecture.

-

This classifier is designed to work with tabular data and provides a flexible interface for specifying model -configurations and preprocessing steps. It integrates smoothly with scikit-learn’s utilities, such as cross-validation -and grid search.

-
-
Parameters:
-

**kwargs (Various) – Accepts any number of keyword arguments that are passed to the MambularConfig and Preprocessor classes. -Known configuration arguments for the model are extracted based on a predefined list, and the rest are -passed to the Preprocessor.

-
-
-
-
-config
-

Configuration object that holds model-specific settings.

-
-
Type:
-

MambularConfig

-
-
-
- -
-
-preprocessor
-

Preprocessor object for handling feature preprocessing like normalization and encoding.

-
-
Type:
-

Preprocessor

-
-
-
- -
-
-model
-

The underlying PyTorch Lightning model, instantiated upon calling the fit method.

-
-
Type:
-

BaseMambularClassifier or None

-
-
-
- -

Methods

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

evaluate(X, y_true[, metrics])

Evaluate the model on the given data using specified metrics.

fit(X, y[, val_size, X_val, y_val, ...])

Fit the model to the given training data, optionally using a separate validation set.

get_metadata_routing()

Get metadata routing of this object.

get_params([deep])

Get parameters for this estimator.

predict(X)

Predict the class labels for the given input samples.

predict_proba(X)

Predict class probabilities for the given input samples.

preprocess_data(X_train, y_train, X_val, ...)

Preprocess the training and validation data and create corresponding DataLoaders.

preprocess_test_data(X)

Preprocesses the test data and creates tensors for categorical and numerical features.

set_fit_request(*[, X_val, batch_size, ...])

Request metadata passed to the fit method.

set_params(**parameters)

Set the parameters of this estimator.

split_data(X, y, val_size, random_state)

Split the dataset into training and validation sets.

-
- -
-
-class mambular.models.sklearn_distributional.MambularLSS(**kwargs)[source]
-

MambularLSS is a machine learning estimator that is designed for structured data, -incorporating both preprocessing and a deep learning model. The estimator -integrates configurable components for data preprocessing and the neural network model, -facilitating end-to-end training and prediction workflows.

-

The initialization of this class separates configuration arguments for the model and -the preprocessor, allowing for flexible adjustment of parameters.

-
-
-config
-

Configuration object containing model-specific parameters.

-
-
Type:
-

MambularConfig

-
-
-
- -
-
-preprocessor
-

Preprocessor object for data preprocessing steps.

-
-
Type:
-

Preprocessor

-
-
-
- -
-
-model
-

The neural network model, initialized based on ‘config’.

-
-
Type:
-

torch.nn.Module

-
-
-
- -
-
Parameters:
-

**kwargs (Arbitrary keyword arguments, divided into configuration for the model and) – preprocessing. Recognized keys include model parameters such as ‘d_model’, -‘n_layers’, etc., and any additional keys are assumed to be preprocessor arguments.

-
-
-

Methods

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

evaluate(X, y_true[, metrics, ...])

Evaluate the model on the given data using specified metrics tailored to the distribution type.

fit(X, y, family[, val_size, X_val, y_val, ...])

Fits the model to the provided data, using the specified loss distribution family for the prediction task.

get_default_metrics(distribution_family)

Provides default metrics based on the distribution family.

get_metadata_routing()

Get metadata routing of this object.

get_params([deep])

Get parameters for this estimator, optionally including parameters from nested components like the preprocessor.

predict(X)

Predicts target values for the given input samples using the fitted model.

preprocess_data(X_train, y_train, X_val, ...)

Preprocess the training and validation data, fit the preprocessor on the training data, and transform both training and validation data.

preprocess_test_data(X)

Preprocess test data using the fitted preprocessor.

set_fit_request(*[, X_val, batch_size, ...])

Request metadata passed to the fit method.

set_params(**parameters)

Set the parameters of this estimator, allowing for modifications to both the configuration and preprocessor parameters.

split_data(X, y, val_size, random_state)

Split the dataset into training and validation sets.

-
- -
-
-class mambular.models.sklearn_embedding_classifier.EmbeddingMambularClassifier(**kwargs)[source]
-

Provides an scikit-learn-like interface for the ProteinMambularClassifier, making it compatible with -scikit-learn’s utilities and workflow. This class encapsulates the PyTorch Lightning model, preprocessing, -and data loading, offering methods for fitting, predicting, and probability estimation in a manner akin -to scikit-learn’s API.

-
-
Parameters:
-

**kwargs (Configuration parameters that can include both MambularConfig settings and preprocessing) – options. Any unrecognized parameters are passed to the preprocessor.

-
-
-
-
-config
-

Configuration object for the model, storing architecture-specific parameters.

-
-
Type:
-

MambularConfig

-
-
-
- -
-
-preprocessor
-

Object handling data preprocessing steps such as feature encoding and normalization.

-
-
Type:
-

Preprocessor

-
-
-
- -
-
-model
-

The underlying neural network model, instantiated during the fit method.

-
-
Type:
-

ProteinMambularClassifier

-
-
-
- -

Methods

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

evaluate(X, y_true[, metrics])

Evaluate the model on the given data using specified metrics.

fit(X, y[, val_size, X_val, y_val, ...])

Fits the model to the given dataset.

get_metadata_routing()

Get metadata routing of this object.

get_params([deep])

Get parameters for this estimator.

predict(X)

Predict the class labels for the given input samples.

predict_proba(X)

Predict class probabilities for the given input samples.

preprocess_data(X_train, y_train, X_val, ...)

Preprocess the training and validation data and create corresponding DataLoaders.

preprocess_test_data(X)

Preprocesses the test data and creates tensors for categorical and numerical features.

set_fit_request(*[, X_val, batch_size, ...])

Request metadata passed to the fit method.

set_params(**parameters)

Set the parameters of this estimator.

split_data(X, y, val_size, random_state)

Split the dataset into training and validation sets.

-
- -
-
-class mambular.models.sklearn_embedding_regressor.EmbeddingMambularRegressor(**kwargs)[source]
-

An sklearn-like interface for the ProteinMambularRegressor, making it compatible with sklearn’s utilities -and workflows. This class wraps the PyTorch Lightning model and preprocessor, providing methods for fitting, -predicting, and setting/getting parameters in a way that mimics sklearn’s API.

-
-
Parameters:
-

**kwargs (Keyword arguments that can include both configuration parameters for the MambularConfig and) – parameters for the preprocessor.

-
-
-
-
-config
-

Configuration object containing model-specific parameters.

-
-
Type:
-

MambularConfig

-
-
-
- -
-
-preprocessor
-

Preprocessor object for data preprocessing steps.

-
-
Type:
-

Preprocessor

-
-
-
- -
-
-model
-

The neural network model, initialized after the fit method is called.

-
-
Type:
-

ProteinMambularRegressor

-
-
-
- -

Methods

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

evaluate(X, y_true[, metrics])

Evaluate the model on the given data using specified metrics.

fit(X, y[, val_size, X_val, y_val, ...])

Fits the ProteinMambularRegressor model to the training data.

get_metadata_routing()

Get metadata routing of this object.

get_params([deep])

Get parameters for this estimator.

predict(X)

Predicts target values for the given input samples.

preprocess_data(X_train, y_train, X_val, ...)

Preprocesses the training and validation data, and creates DataLoaders for them.

preprocess_test_data(X)

Preprocesses the test data and creates tensors for categorical and numerical features.

set_fit_request(*[, X_val, batch_size, ...])

Request metadata passed to the fit method.

set_params(**parameters)

Set the parameters of this estimator.

split_data(X, y, val_size, random_state)

Splits the dataset into training and validation sets.

-
- -
-
-class mambular.models.sklearn_regressor.MambularRegressor(**kwargs)[source]
-

A regressor implemented using PyTorch Lightning that follows the scikit-learn API conventions. This class is designed -to work with tabular data, offering a straightforward way to specify model configurations and preprocessing steps. It -integrates seamlessly with scikit-learn’s tools such as cross-validation and grid search.

-
-
Parameters:
-

**kwargs (Various) – Accepts any number of keyword arguments. Arguments recognized as model configuration options are passed to the -MambularConfig constructor. Remaining arguments are assumed to be preprocessor options and passed to the -Preprocessor constructor.

-
-
-
-
-config
-

An object storing the configuration settings for the model.

-
-
Type:
-

MambularConfig

-
-
-
- -
-
-preprocessor
-

An object responsible for preprocessing the input data, such as encoding categorical variables and scaling numerical features.

-
-
Type:
-

Preprocessor

-
-
-
- -
-
-model
-

The underlying regression model, which is a PyTorch Lightning module. It is instantiated when the fit method is called.

-
-
Type:
-

BaseMambularRegressor or None

-
-
-
- -

Methods

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

evaluate(X, y_true[, metrics])

Evaluate the model on the given data using specified metrics.

fit(X, y[, val_size, X_val, y_val, ...])

Trains the regression model using the provided training data.

get_metadata_routing()

Get metadata routing of this object.

get_params([deep])

Get parameters for this estimator.

predict(X)

Predicts target values for the given input samples.

preprocess_data(X_train, y_train, X_val, ...)

Preprocesses the training and validation data, and creates DataLoaders for them.

preprocess_test_data(X)

Preprocesses the test data and creates tensors for categorical and numerical features.

set_fit_request(*[, X_val, batch_size, ...])

Request metadata passed to the fit method.

set_params(**parameters)

Set the parameters of this estimator.

split_data(X, y, val_size, random_state)

Splits the dataset into training and validation sets.

-
- -
- - -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/api/models/index.html b/docs/$READTHEDOCS_OUTPUT/html/api/models/index.html deleted file mode 100644 index 3842b99..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/api/models/index.html +++ /dev/null @@ -1,158 +0,0 @@ - - - - - - - Models — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
- -
-
-
-
- -
-

Models

-

This module provides classes for the Mambular models that adhere to scikit-learn’s BaseEstimator interface.

- - - - - - - - - - - - - - - - - - - - - - - -

Functionality

Description

MambularClassifier

Multi-class and binary classification tasks.

MambularLSS

Various statistical distribution families for different types of regression and classification tasks.

EmbeddingMambularClassifier

Specialized classification module for complex protein sequence data.

EmbeddingMambularRegressor

Specialized regression module for complex protein sequence data.

MambularRegressor

Regression tasks.

-
-
-
- - -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/genindex.html b/docs/$READTHEDOCS_OUTPUT/html/genindex.html deleted file mode 100644 index e8099a1..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/genindex.html +++ /dev/null @@ -1,383 +0,0 @@ - - - - - - Index — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - -
  • -
  • -
-
-
-
-
- - -

Index

- -
- A - | B - | C - | E - | F - | L - | M - | N - | P - | T - | V - -
-

A

- - - -
- -

B

- - - -
- -

C

- - - -
- -

E

- - - -
- -

F

- - -
- -

L

- - -
- -

M

- - - -
- -

N

- - - -
- -

P

- - -
- -

T

- - - -
- -

V

- - -
- - - -
-
-
- -
- -
-

© Copyright 2024, Christoph Weisser.

-
- - Built with Sphinx using a - theme - provided by Read the Docs. - - -
-
-
-
-
- - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/index.html b/docs/$READTHEDOCS_OUTPUT/html/index.html deleted file mode 100644 index a81c759..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/index.html +++ /dev/null @@ -1,261 +0,0 @@ - - - - - - - Mamba-Tabluar — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
- -
-
-
-
- -
-

Mamba-Tabluar

-

Mambular is a Python package that brings the power of Mamba architectures to tabular data, offering a suite of deep learning models for regression, classification, and distributional regression tasks. Designed with ease of use in mind, Mambular models adhere to scikit-learn’s BaseEstimator interface, making them highly compatible with the familiar scikit-learn ecosystem. This means you can fit, predict, and transform using Mambular models just as you would with any traditional scikit-learn model, but with the added performance and flexibility of deep learning.

-
-

Features

-
    -
  • Comprehensive Model Suite: Includes modules for regression (MambularRegressor), classification (MambularClassifier), and distributional regression (MambularLSS), catering to a wide range of tabular data tasks.

  • -
  • State-of-the-Art Architectures: Leverages the Mamba architecture, known for its effectiveness in handling sequential and time-series data within a state-space modeling framework, adapted here for tabular data.

  • -
  • Seamless Integration: Designed to work effortlessly with scikit-learn, allowing for easy inclusion in existing machine learning pipelines, cross-validation, and hyperparameter tuning workflows.

  • -
  • Extensive Preprocessing: Comes with a powerful preprocessing module that supports a broad array of data transformation techniques, ensuring that your data is optimally prepared for model training.

  • -
  • Sklearn-like API: The familiar scikit-learn fit, predict, and predict_proba methods mean minimal learning curve for those already accustomed to scikit-learn.

  • -
  • PyTorch Lightning Under the Hood: Built on top of PyTorch Lightning, Mambular models benefit from streamlined training processes, easy customization, and advanced features like distributed training and 16-bit precision.

  • -
-
-
-

Installation

-

Install Mambular using pip:

-
pip install mambular
-
-
-
-
-

Advanced Preprocessing for Optimal Performance

-

Mambular elevates the preprocessing stage of model development, employing a sophisticated suite of techniques to ensure your data is in the best shape for the Mamba architectures. Our preprocessing module is designed to be both powerful and intuitive, offering a range of options to transform your tabular data efficiently.

-
-

Intelligent Data Type Detection and Transformation

-

Mambular automatically identifies the type of each feature in your dataset, applying the most suitable transformations to numerical and categorical variables. This includes:

-
    -
  • Ordinal Encoding: Categorical features are seamlessly transformed into numerical values, preserving their inherent order and making them model-ready.

  • -
  • One-Hot Encoding: For nominal data, Mambular employs one-hot encoding to capture the presence or absence of categories without imposing ordinality.

  • -
  • Binning: Numerical features can be discretized into bins, a useful technique for handling continuous variables in certain modeling contexts.

  • -
  • Decision Tree Binning: Optionally, Mambular can use decision trees to find the optimal binning strategy for numerical features, enhancing model interpretability and performance.

  • -
  • Normalization: Mambular can easily handle numerical features without specifically turning them into categorical features. Standard preprocessing steps such as normalization per feature are possible

  • -
  • Standardization: Similarly, Standardization instead of Normalization can be used.

  • -
-
-
-

Handling Missing Values

-

Our preprocessing pipeline gracefully handles missing data, employing strategies like mean imputation for numerical features and mode imputation for categorical ones, ensuring that your models receive complete data inputs without manual intervention.

-
-
-

Flexible and Customizable

-

While Mambular excels in automating the preprocessing workflow, it also offers flexibility. You can customize the preprocessing steps to fit the unique needs of your dataset, ensuring that you’re not locked into a one-size-fits-all approach.

-

By integrating Mambular’s preprocessing module into your workflow, you’re not just preparing your data for deep learning; you’re optimizing it for excellence. This commitment to data quality is what sets Mambular apart, making it an indispensable tool in your machine learning arsenal.

-
-
-
-

Fit a Model

-

Fitting a model in mambular is as simple as it gets. All models in mambular are sklearn BaseEstimators. Thus the .fit method is implemented for all of them. Additionally, this allows for using all other sklearn inherent methods such as their built in hyperparameter optimization tools.

-
from mambular.models import MambularClassifier
-# Initialize and fit your model
-model = MambularClassifier(
-    dropout=0.01,
-    d_model=128,
-    n_layers=6,
-    numerical_preprocessing="normalization",
-)
-
-# X can be a dataframe or something that can be easily transformed into a pd.DataFrame as a np.array
-model.fit(X, y, max_epochs=500, lr=1e-03, patience=25)
-
-
-

Predictions are also easily obtained:

-
# simple predictions
-preds = model.predict(X)
-
-# Predict probabilities
-preds = model.predict_proba(X)
-
-
-
-
-

Distributional Regression with MambularLSS

-

Mambular introduces a cutting-edge approach to distributional regression through its MambularLSS module, empowering users to model the full distribution of a response variable, not just its mean. This method is particularly valuable in scenarios where understanding the variability, skewness, or kurtosis of the response distribution is as crucial as predicting its central tendency.

-
-

Key Features of MambularLSS:

-
    -
  • Full Distribution Modeling: Unlike traditional regression models that predict a single value (e.g., the mean), MambularLSS models the entire distribution of the response variable. This allows for more informative predictions, including quantiles, variance, and higher moments.

  • -
  • Customizable Distribution Types: MambularLSS supports a variety of distribution families (e.g., Gaussian, Poisson, Binomial), making it adaptable to different types of response variables, from continuous to count data.

  • -
  • Location, Scale, Shape Parameters: The model predicts parameters corresponding to the location, scale, and shape of the distribution, offering a nuanced understanding of the data’s underlying distributional characteristics.

  • -
  • Enhanced Predictive Uncertainty: By modeling the full distribution, MambularLSS provides richer information on predictive uncertainty, enabling more robust decision-making processes in uncertain environments.

  • -
-
-
-

Available Distribution Classes:

-

MambularLSS offers a wide range of distribution classes to cater to various statistical modeling needs. The available distribution classes include:

-
    -
  • normal: Normal Distribution for modeling continuous data with a symmetric distribution around the mean.

  • -
  • poisson: Poisson Distribution for modeling count data that represents the number of events occurring within a fixed interval.

  • -
  • gamma: Gamma Distribution for modeling continuous data that is skewed and bounded at zero, often used for waiting times.

  • -
  • beta: Beta Distribution for modeling data that is bounded between 0 and 1, useful for proportions and percentages.

  • -
  • dirichlet: Dirichlet Distribution for modeling multivariate data where individual components are correlated, and the sum is constrained to 1.

  • -
  • studentt: Student’s T-Distribution for modeling data with heavier tails than the normal distribution, useful when the sample size is small.

  • -
  • negativebinom: Negative Binomial Distribution for modeling count data with over-dispersion relative to the Poisson distribution.

  • -
  • inversegamma: Inverse Gamma Distribution, often used as a prior distribution in Bayesian inference for scale parameters.

  • -
  • categorical: Categorical Distribution for modeling categorical data with more than two categories.

  • -
-

These distribution classes allow MambularLSS to flexibly model a wide variety of data types and distributions, providing users with the tools needed to capture the full complexity of their data.

-
-
-

Use Cases for MambularLSS:

-
    -
  • Risk Assessment: In finance or insurance, understanding the range and likelihood of potential losses is as important as predicting average outcomes.

  • -
  • Demand Forecasting: For inventory management, capturing the variability in product demand helps in optimizing stock levels.

  • -
  • Personalized Medicine: In healthcare, distributional regression can predict a range of possible patient responses to a treatment, aiding in personalized therapy planning.

  • -
-
-
-

Getting Started with MambularLSS:

-

To integrate distributional regression into your workflow with MambularLSS, start by initializing the model with your desired configuration, similar to other Mambular models:

-
from mambular.models import MambularLSS
-
-# Initialize the MambularLSS model
-model = MambularLSS(
-    dropout=0.2,
-    d_model=256,
-    n_layers=4,
-
-)
-
-# Fit the model to your data
-model.fit(
-    X,
-    y,
-    max_epochs=300,
-    lr=1e-03,
-    patience=10,
-    family="normal" # define your distribution
-    )
-
-
-
-
-
-
-
-
-
- - -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/installation.html b/docs/$READTHEDOCS_OUTPUT/html/installation.html deleted file mode 100644 index 682059f..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/installation.html +++ /dev/null @@ -1,136 +0,0 @@ - - - - - - - Installation — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
- -
-
-
-
- -
-

Installation

-

Please follow the steps below for installing mambular

-

Install from the source:

-
cd mamba-tabular
-pip install .
-
-
-

Note: Make sure you in the same directory where setup.py file resides.

-

This package is so far not available in PyPi.

-
- - -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/mamba.html b/docs/$READTHEDOCS_OUTPUT/html/mamba.html deleted file mode 100644 index b4956ff..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/mamba.html +++ /dev/null @@ -1,278 +0,0 @@ - - - - - - - Mamba-Tabluar — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
- -
-
-
-
- -
-

Mamba-Tabluar

-

Mambular is a Python package that brings the power of Mamba architectures to tabular data, offering a suite of deep learning models for regression, classification, and distributional regression tasks. Designed with ease of use in mind, Mambular models adhere to scikit-learn’s BaseEstimator interface, making them highly compatible with the familiar scikit-learn ecosystem. This means you can fit, predict, and transform using Mambular models just as you would with any traditional scikit-learn model, but with the added performance and flexibility of deep learning.

-
-

Features

-
    -
  • Comprehensive Model Suite: Includes modules for regression (MambularRegressor), classification (MambularClassifier), and distributional regression (MambularLSS), catering to a wide range of tabular data tasks.

  • -
  • State-of-the-Art Architectures: Leverages the Mamba architecture, known for its effectiveness in handling sequential and time-series data within a state-space modeling framework, adapted here for tabular data.

  • -
  • Seamless Integration: Designed to work effortlessly with scikit-learn, allowing for easy inclusion in existing machine learning pipelines, cross-validation, and hyperparameter tuning workflows.

  • -
  • Extensive Preprocessing: Comes with a powerful preprocessing module that supports a broad array of data transformation techniques, ensuring that your data is optimally prepared for model training.

  • -
  • Sklearn-like API: The familiar scikit-learn fit, predict, and predict_proba methods mean minimal learning curve for those already accustomed to scikit-learn.

  • -
  • PyTorch Lightning Under the Hood: Built on top of PyTorch Lightning, Mambular models benefit from streamlined training processes, easy customization, and advanced features like distributed training and 16-bit precision.

  • -
-
-
-

Installation

-

Install Mambular using pip:

-
pip install mambular
-
-
-
-
-

Advanced Preprocessing for Optimal Performance

-

Mambular elevates the preprocessing stage of model development, employing a sophisticated suite of techniques to ensure your data is in the best shape for the Mamba architectures. Our preprocessing module is designed to be both powerful and intuitive, offering a range of options to transform your tabular data efficiently.

-
-

Intelligent Data Type Detection and Transformation

-

Mambular automatically identifies the type of each feature in your dataset, applying the most suitable transformations to numerical and categorical variables. This includes:

-
    -
  • Ordinal Encoding: Categorical features are seamlessly transformed into numerical values, preserving their inherent order and making them model-ready.

  • -
  • One-Hot Encoding: For nominal data, Mambular employs one-hot encoding to capture the presence or absence of categories without imposing ordinality.

  • -
  • Binning: Numerical features can be discretized into bins, a useful technique for handling continuous variables in certain modeling contexts.

  • -
  • Decision Tree Binning: Optionally, Mambular can use decision trees to find the optimal binning strategy for numerical features, enhancing model interpretability and performance.

  • -
  • Normalization: Mambular can easily handle numerical features without specifically turning them into categorical features. Standard preprocessing steps such as normalization per feature are possible

  • -
  • Standardization: Similarly, Standardization instead of Normalization can be used.

  • -
-
-
-

Handling Missing Values

-

Our preprocessing pipeline gracefully handles missing data, employing strategies like mean imputation for numerical features and mode imputation for categorical ones, ensuring that your models receive complete data inputs without manual intervention.

-
-
-

Flexible and Customizable

-

While Mambular excels in automating the preprocessing workflow, it also offers flexibility. You can customize the preprocessing steps to fit the unique needs of your dataset, ensuring that you’re not locked into a one-size-fits-all approach.

-

By integrating Mambular’s preprocessing module into your workflow, you’re not just preparing your data for deep learning; you’re optimizing it for excellence. This commitment to data quality is what sets Mambular apart, making it an indispensable tool in your machine learning arsenal.

-
-
-
-

Fit a Model

-

Fitting a model in mambular is as simple as it gets. All models in mambular are sklearn BaseEstimators. Thus the .fit method is implemented for all of them. Additionally, this allows for using all other sklearn inherent methods such as their built in hyperparameter optimization tools.

-
from mambular.models import MambularClassifier
-# Initialize and fit your model
-model = MambularClassifier(
-    dropout=0.01,
-    d_model=128,
-    n_layers=6,
-    numerical_preprocessing="normalization",
-)
-
-# X can be a dataframe or something that can be easily transformed into a pd.DataFrame as a np.array
-model.fit(X, y, max_epochs=500, lr=1e-03, patience=25)
-
-
-

Predictions are also easily obtained:

-
# simple predictions
-preds = model.predict(X)
-
-# Predict probabilities
-preds = model.predict_proba(X)
-
-
-
-
-

Distributional Regression with MambularLSS

-

Mambular introduces a cutting-edge approach to distributional regression through its MambularLSS module, empowering users to model the full distribution of a response variable, not just its mean. This method is particularly valuable in scenarios where understanding the variability, skewness, or kurtosis of the response distribution is as crucial as predicting its central tendency.

-
-

Key Features of MambularLSS:

-
    -
  • Full Distribution Modeling: Unlike traditional regression models that predict a single value (e.g., the mean), MambularLSS models the entire distribution of the response variable. This allows for more informative predictions, including quantiles, variance, and higher moments.

  • -
  • Customizable Distribution Types: MambularLSS supports a variety of distribution families (e.g., Gaussian, Poisson, Binomial), making it adaptable to different types of response variables, from continuous to count data.

  • -
  • Location, Scale, Shape Parameters: The model predicts parameters corresponding to the location, scale, and shape of the distribution, offering a nuanced understanding of the data’s underlying distributional characteristics.

  • -
  • Enhanced Predictive Uncertainty: By modeling the full distribution, MambularLSS provides richer information on predictive uncertainty, enabling more robust decision-making processes in uncertain environments.

  • -
-
-
-

Available Distribution Classes:

-

MambularLSS offers a wide range of distribution classes to cater to various statistical modeling needs. The available distribution classes include:

-
    -
  • normal: Normal Distribution for modeling continuous data with a symmetric distribution around the mean.

  • -
  • poisson: Poisson Distribution for modeling count data that represents the number of events occurring within a fixed interval.

  • -
  • gamma: Gamma Distribution for modeling continuous data that is skewed and bounded at zero, often used for waiting times.

  • -
  • beta: Beta Distribution for modeling data that is bounded between 0 and 1, useful for proportions and percentages.

  • -
  • dirichlet: Dirichlet Distribution for modeling multivariate data where individual components are correlated, and the sum is constrained to 1.

  • -
  • studentt: Student’s T-Distribution for modeling data with heavier tails than the normal distribution, useful when the sample size is small.

  • -
  • negativebinom: Negative Binomial Distribution for modeling count data with over-dispersion relative to the Poisson distribution.

  • -
  • inversegamma: Inverse Gamma Distribution, often used as a prior distribution in Bayesian inference for scale parameters.

  • -
  • categorical: Categorical Distribution for modeling categorical data with more than two categories.

  • -
-

These distribution classes allow MambularLSS to flexibly model a wide variety of data types and distributions, providing users with the tools needed to capture the full complexity of their data.

-
-
-

Use Cases for MambularLSS:

-
    -
  • Risk Assessment: In finance or insurance, understanding the range and likelihood of potential losses is as important as predicting average outcomes.

  • -
  • Demand Forecasting: For inventory management, capturing the variability in product demand helps in optimizing stock levels.

  • -
  • Personalized Medicine: In healthcare, distributional regression can predict a range of possible patient responses to a treatment, aiding in personalized therapy planning.

  • -
-
-
-

Getting Started with MambularLSS:

-

To integrate distributional regression into your workflow with MambularLSS, start by initializing the model with your desired configuration, similar to other Mambular models:

-
from mambular.models import MambularLSS
-
-# Initialize the MambularLSS model
-model = MambularLSS(
-    dropout=0.2,
-    d_model=256,
-    n_layers=4,
- 
-)
-
-# Fit the model to your data
-model.fit(
-    X, 
-    y, 
-    max_epochs=300, 
-    lr=1e-03, 
-    patience=10,     
-    family="normal" # define your distribution
-    )
-
-
-
-
-
-
- - -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/objects.inv b/docs/$READTHEDOCS_OUTPUT/html/objects.inv deleted file mode 100644 index 4ffc244..0000000 Binary files a/docs/$READTHEDOCS_OUTPUT/html/objects.inv and /dev/null differ diff --git a/docs/$READTHEDOCS_OUTPUT/html/quickstart.html b/docs/$READTHEDOCS_OUTPUT/html/quickstart.html deleted file mode 100644 index 16cfac7..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/quickstart.html +++ /dev/null @@ -1,135 +0,0 @@ - - - - - - - Quickstart — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
- -
-
-
-
- -
-

Quickstart

-

Example code for implementing the models:

-
    -
  1. Classification

  2. -
  3. Distributional

  4. -
  5. Embedding Regression

  6. -
  7. Regression

  8. -
-
- - -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/search.html b/docs/$READTHEDOCS_OUTPUT/html/search.html deleted file mode 100644 index 5ca01e1..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/search.html +++ /dev/null @@ -1,139 +0,0 @@ - - - - - - Search — mamba-tabular 06.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - -
  • -
  • -
-
-
-
-
- - - - -
- -
- -
-
-
- -
- -
-

© Copyright 2024, Christoph Weisser.

-
- - Built with Sphinx using a - theme - provided by Read the Docs. - - -
-
-
-
-
- - - - - - - - - \ No newline at end of file diff --git a/docs/$READTHEDOCS_OUTPUT/html/searchindex.js b/docs/$READTHEDOCS_OUTPUT/html/searchindex.js deleted file mode 100644 index a45349e..0000000 --- a/docs/$READTHEDOCS_OUTPUT/html/searchindex.js +++ /dev/null @@ -1 +0,0 @@ -Search.setIndex({"docnames": ["api/base_models/BaseModels", "api/base_models/index", "api/models/Models", "api/models/index", "index", "installation", "mamba", "quickstart"], "filenames": ["api\\base_models\\BaseModels.rst", "api\\base_models\\index.rst", "api\\models\\Models.rst", "api\\models\\index.rst", "index.rst", "installation.md", "mamba.md", "quickstart.md"], "titles": ["Base Models", "BaseModels", "Models", "Models", "Mamba-Tabluar", "Installation", "Mamba-Tabluar", "Quickstart"], "terms": {"class": [0, 1, 2, 3], "mambular": [0, 1, 2, 3, 4, 5, 6], "base_model": 0, "classifi": [0, 2], "basemambularclassifi": [0, 1, 2], "num_class": 0, "config": [0, 2], "cat_feature_info": 0, "none": [0, 2], "num_feature_info": 0, "lr": [0, 4, 6], "0": [0, 4, 6], "001": 0, "lr_patienc": 0, "10": [0, 4, 6], "weight_decai": 0, "025": 0, "lr_factor": 0, "75": 0, "sourc": [0, 2, 5], "A": [0, 2], "build": 0, "classif": [0, 1, 3, 4, 6, 7], "us": [0, 2], "architectur": [0, 2, 4, 6], "within": [0, 4, 6], "pytorch": [0, 2, 4, 6], "lightn": [0, 2, 4, 6], "framework": [0, 4, 6], "thi": [0, 1, 2, 3, 4, 5, 6], "integr": [0, 2, 4, 6], "variou": [0, 1, 2, 3, 4, 6], "compon": [0, 2, 4, 6], "embed": [0, 7], "categor": [0, 2, 4, 6], "numer": [0, 2, 4, 6], "featur": [0, 2], "process": [0, 4, 6], "sequenc": [0, 1, 3], "head": 0, "predict": [0, 2, 4, 6], "It": [0, 2], "support": [0, 4, 6], "multi": [0, 1, 3], "binari": [0, 1, 3], "task": [0, 1, 3, 4, 6], "paramet": [0, 2, 4, 6], "int": 0, "The": [0, 2, 4, 6], "number": [0, 2, 4, 6], "For": [0, 4, 6], "should": 0, "2": [0, 4, 6], "mambularconfig": [0, 2], "an": [0, 2, 4, 6], "instanc": 0, "contain": [0, 2], "configur": [0, 2, 4, 6], "dict": 0, "option": [0, 2, 4, 6], "dictionari": 0, "map": 0, "name": 0, "uniqu": [0, 4, 6], "categori": [0, 4, 6], "inform": [0, 4, 6], "i": [0, 2, 4, 5, 6], "layer": 0, "default": 0, "size": [0, 4, 6], "input": [0, 2, 4, 6], "dimens": 0, "float": 0, "learn": [0, 2, 3, 4, 6], "rate": 0, "optim": 0, "1e": [0, 4, 6], "03": [0, 4, 6], "epoch": 0, "improv": 0, "after": [0, 2], "which": [0, 2], "reduc": 0, "weight": 0, "decai": 0, "l2": 0, "penalti": 0, "factor": 0, "embedding_activ": 0, "activ": 0, "function": [0, 1, 3], "appli": [0, 4, 6], "linear": 0, "transform": 0, "type": [0, 1, 2, 3], "nn": [0, 2], "modul": [0, 1, 2, 3, 4, 6], "num_embed": 0, "list": [0, 2], "sequenti": [0, 4, 6], "each": [0, 4, 6], "correspond": [0, 4, 6], "modulelist": 0, "cat_embed": 0, "mamba": [0, 5], "norm_f": 0, "normal": [0, 2, 4, 6], "tabular_head": 0, "label": 0, "from": [0, 4, 5, 6], "aggreg": 0, "represent": 0, "pooling_method": 0, "method": [0, 2, 4, 6], "across": 0, "ar": [0, 2, 4, 6], "avg": 0, "max": 0, "sum": [0, 4, 6], "str": 0, "loss_fct": 0, "loss": [0, 4, 6], "train": [0, 2, 4, 6], "acc": 0, "metric": 0, "comput": 0, "accuraci": 0, "torchmetr": 0, "auroc": 0, "area": 0, "under": [0, 4, 6], "receiv": [0, 4, 6], "oper": 0, "characterist": [0, 4, 6], "curv": [0, 4, 6], "precis": [0, 4, 6], "forward": 0, "cat_featur": 0, "num_featur": 0, "defin": [0, 4, 6], "pass": [0, 2], "both": [0, 2, 4, 6], "produc": 0, "training_step": 0, "batch": 0, "batch_idx": 0, "perform": 0, "singl": [0, 4, 6], "step": [0, 2, 4, 5, 6], "log": 0, "set": [0, 2, 4, 6], "validation_step": 0, "valid": [0, 2, 4, 6], "configure_optim": 0, "": [0, 2, 3, 4, 6], "schedul": 0, "attribut": 0, "automatic_optim": 0, "If": 0, "fals": 0, "you": [0, 4, 5, 6], "respons": [0, 2, 4, 6], "call": [0, 2], "backward": 0, "zero_grad": 0, "current_epoch": 0, "current": 0, "trainer": 0, "attach": 0, "devic": 0, "dtype": 0, "example_input_arrai": 0, "exampl": [0, 7], "arrai": [0, 4, 6], "specif": [0, 2, 4, 6], "what": [0, 4, 6], "can": [0, 2, 4, 6], "consum": 0, "fabric": 0, "global_rank": 0, "index": 0, "all": [0, 4, 6], "node": 0, "global_step": 0, "total": 0, "seen": 0, "hparam": 0, "collect": 0, "hyperparamet": [0, 4, 6], "save": 0, "save_hyperparamet": 0, "hparams_initi": 0, "local_rank": 0, "logger": 0, "refer": 0, "object": [0, 2], "on_gpu": 0, "return": 0, "true": 0, "locat": [0, 4, 6], "gpu": 0, "strict_load": 0, "determin": 0, "how": 0, "load": [0, 2], "load_state_dict": 0, "strict": 0, "__call__": 0, "up": 0, "provid": [0, 1, 2, 3, 4, 6], "lr_schedul": 0, "tensor": 0, "output": 0, "dure": [0, 2], "tupl": 0, "data": [0, 1, 2, 3], "dataload": 0, "distribut": [0, 1, 3, 7], "basemambularlss": [0, 1], "famili": [0, 1, 3, 4, 6], "distribution_param": 0, "likelihood": [0, 4, 6], "statist": [0, 1, 3, 4, 6], "lss": 0, "built": [0, 4, 6], "tabular": [0, 2, 4, 5, 6], "design": [0, 2, 4, 6], "accommod": 0, "differ": [0, 1, 3, 4, 6], "regress": [0, 1, 2, 3, 7], "includ": [0, 2, 4, 6], "poisson": [0, 4, 6], "gamma": [0, 4, 6], "beta": [0, 4, 6], "dirichlet": [0, 4, 6], "studentt": [0, 4, 6], "negativebinom": [0, 4, 6], "inversegamma": [0, 4, 6], "initi": [0, 2, 4, 6], "coeffici": 0, "addit": [0, 2], "chosen": 0, "core": 0, "neural": [0, 2], "network": [0, 2], "implement": [0, 2, 4, 6, 7], "block": 0, "final": 0, "deriv": 0, "callabl": 0, "typic": 0, "embedding_classifi": 0, "baseembeddingmambularclassifi": 0, "seq_siz": 0, "20": 0, "raw_embed": 0, "special": [0, 1, 3], "protein": [0, 1, 3], "raw": 0, "make": [0, 2, 4, 5, 6], "suitabl": [0, 4, 6], "complex": [0, 1, 3, 4, 6], "about": 0, "regular": 0, "chunk": 0, "relev": 0, "when": [0, 2, 4, 6], "bool": 0, "indic": 0, "whether": 0, "directli": 0, "them": [0, 4, 6], "target": 0, "depend": 0, "embedding_regressor": 0, "baseembeddingmambularregressor": [0, 1], "mseloss": 0, "regressor": [0, 2], "basemambularregressor": [0, 1, 2], "incorpor": [0, 2], "train_ms": 0, "mean": [0, 4, 6], "squar": 0, "error": 0, "meansquarederror": 0, "val_ms": 0, "torch": [0, 2], "base": [1, 2], "model": [1, 7], "descript": [1, 3], "embeddingmambularclassifi": [1, 2, 3], "sklearn_classifi": 2, "mambularclassifi": [2, 3, 4, 6], "kwarg": 2, "mimic": 2, "scikit": [2, 3, 4, 6], "api": [2, 4, 6], "custom": [2, 4, 6], "work": [2, 4, 6], "flexibl": 2, "interfac": [2, 3, 4, 6], "specifi": 2, "preprocess": 2, "smoothli": 2, "util": 2, "cross": [2, 4, 6], "grid": 2, "search": 2, "accept": 2, "ani": [2, 4, 6], "keyword": 2, "argument": 2, "preprocessor": 2, "known": [2, 4, 6], "extract": 2, "predefin": 2, "rest": 2, "hold": 2, "handl": 2, "like": [2, 4, 6], "encod": [2, 4, 6], "underli": [2, 4, 6], "instanti": 2, "upon": 2, "fit": 2, "sklearn_distribut": 2, "mambularlss": [2, 3], "machin": [2, 4, 6], "estim": 2, "structur": 2, "deep": [2, 4, 6], "facilit": 2, "end": 2, "workflow": [2, 4, 6], "separ": 2, "allow": [2, 4, 6], "adjust": 2, "arbitrari": 2, "divid": 2, "recogn": 2, "kei": 2, "d_model": [2, 4, 6], "n_layer": [2, 4, 6], "etc": 2, "assum": 2, "sklearn_embedding_classifi": 2, "proteinmambularclassifi": 2, "compat": [2, 4, 6], "encapsul": 2, "offer": [2, 4, 6], "probabl": [2, 4, 6], "manner": 2, "akin": 2, "unrecogn": 2, "store": 2, "sklearn_embedding_regressor": 2, "embeddingmambularregressor": [2, 3], "sklearn": [2, 4, 6], "proteinmambularregressor": 2, "wrap": 2, "get": 2, "wai": 2, "sklearn_regressor": 2, "mambularregressor": [2, 3, 4, 6], "follow": [2, 5], "convent": 2, "straightforward": 2, "seamlessli": [2, 4, 6], "tool": [2, 4, 6], "constructor": 2, "remain": 2, "variabl": [2, 4, 6], "scale": [2, 4, 6], "adher": [3, 4, 6], "baseestim": [3, 4, 6], "python": [4, 6], "packag": [4, 5, 6], "bring": [4, 6], "power": [4, 6], "suit": [4, 6], "eas": [4, 6], "mind": [4, 6], "highli": [4, 6], "familiar": [4, 6], "ecosystem": [4, 6], "just": [4, 6], "would": [4, 6], "tradit": [4, 6], "ad": [4, 6], "comprehens": [4, 6], "cater": [4, 6], "wide": [4, 6], "rang": [4, 6], "state": [4, 6], "art": [4, 6], "leverag": [4, 6], "its": [4, 6], "effect": [4, 6], "time": [4, 6], "seri": [4, 6], "space": [4, 6], "adapt": [4, 6], "here": [4, 6], "seamless": [4, 6], "effortlessli": [4, 6], "easi": [4, 6], "inclus": [4, 6], "exist": [4, 6], "pipelin": [4, 6], "tune": [4, 6], "extens": [4, 6], "come": [4, 6], "broad": [4, 6], "techniqu": [4, 6], "ensur": [4, 6], "your": [4, 6], "prepar": [4, 6], "predict_proba": [4, 6], "minim": [4, 6], "those": [4, 6], "alreadi": [4, 6], "accustom": [4, 6], "hood": [4, 6], "top": [4, 6], "benefit": [4, 6], "streamlin": [4, 6], "16": [4, 6], "bit": [4, 6], "pip": [4, 5, 6], "elev": [4, 6], "stage": [4, 6], "develop": [4, 6], "emploi": [4, 6], "sophist": [4, 6], "best": [4, 6], "shape": [4, 6], "our": [4, 6], "intuit": [4, 6], "effici": [4, 6], "automat": [4, 6], "identifi": [4, 6], "dataset": [4, 6], "most": [4, 6], "ordin": [4, 6], "preserv": [4, 6], "inher": [4, 6], "order": [4, 6], "readi": [4, 6], "One": [4, 6], "hot": [4, 6], "nomin": [4, 6], "one": [4, 6], "captur": [4, 6], "presenc": [4, 6], "absenc": [4, 6], "without": [4, 6], "impos": [4, 6], "bin": [4, 6], "discret": [4, 6], "continu": [4, 6], "certain": [4, 6], "context": [4, 6], "decis": [4, 6], "tree": [4, 6], "find": [4, 6], "strategi": [4, 6], "enhanc": [4, 6], "interpret": [4, 6], "easili": [4, 6], "turn": [4, 6], "standard": [4, 6], "per": [4, 6], "possibl": [4, 6], "similarli": [4, 6], "instead": [4, 6], "gracefulli": [4, 6], "imput": [4, 6], "mode": [4, 6], "ones": [4, 6], "complet": [4, 6], "manual": [4, 6], "intervent": [4, 6], "while": [4, 6], "excel": [4, 6], "autom": [4, 6], "also": [4, 6], "need": [4, 6], "re": [4, 6], "lock": [4, 6], "approach": [4, 6], "By": [4, 6], "commit": [4, 6], "qualiti": [4, 6], "apart": [4, 6], "indispens": [4, 6], "arsen": [4, 6], "simpl": [4, 6], "thu": [4, 6], "addition": [4, 6], "other": [4, 6], "import": [4, 6], "dropout": [4, 6], "01": [4, 6], "128": [4, 6], "6": [4, 6], "numerical_preprocess": [4, 6], "x": [4, 6], "datafram": [4, 6], "someth": [4, 6], "pd": [4, 6], "np": [4, 6], "y": [4, 6], "max_epoch": [4, 6], "500": [4, 6], "patienc": [4, 6], "25": [4, 6], "obtain": [4, 6], "pred": [4, 6], "introduc": [4, 6], "cut": [4, 6], "edg": [4, 6], "through": [4, 6], "empow": [4, 6], "user": [4, 6], "full": [4, 6], "particularli": [4, 6], "valuabl": [4, 6], "scenario": [4, 6], "where": [4, 5, 6], "understand": [4, 6], "skew": [4, 6], "kurtosi": [4, 6], "crucial": [4, 6], "central": [4, 6], "tendenc": [4, 6], "unlik": [4, 6], "e": [4, 6], "g": [4, 6], "entir": [4, 6], "more": [4, 6], "quantil": [4, 6], "varianc": [4, 6], "higher": [4, 6], "moment": [4, 6], "varieti": [4, 6], "gaussian": [4, 6], "binomi": [4, 6], "count": [4, 6], "nuanc": [4, 6], "uncertainti": [4, 6], "richer": [4, 6], "enabl": [4, 6], "robust": [4, 6], "uncertain": [4, 6], "environ": [4, 6], "symmetr": [4, 6], "around": [4, 6], "repres": [4, 6], "event": [4, 6], "occur": [4, 6], "fix": [4, 6], "interv": [4, 6], "bound": [4, 6], "zero": [4, 6], "often": [4, 6], "wait": [4, 6], "between": [4, 6], "1": [4, 6], "proport": [4, 6], "percentag": [4, 6], "multivari": [4, 6], "individu": [4, 6], "correl": [4, 6], "constrain": [4, 6], "student": [4, 6], "t": [4, 6], "heavier": [4, 6], "tail": [4, 6], "than": [4, 6], "sampl": [4, 6], "small": [4, 6], "neg": [4, 6], "over": [4, 6], "dispers": [4, 6], "rel": [4, 6], "invers": [4, 6], "prior": [4, 6], "bayesian": [4, 6], "infer": [4, 6], "two": [4, 6], "These": [4, 6], "flexibli": [4, 6], "risk": [4, 6], "assess": [4, 6], "In": [4, 6], "financ": [4, 6], "insur": [4, 6], "potenti": [4, 6], "averag": [4, 6], "outcom": [4, 6], "demand": [4, 6], "forecast": [4, 6], "inventori": [4, 6], "manag": [4, 6], "product": [4, 6], "help": [4, 6], "stock": [4, 6], "level": [4, 6], "person": [4, 6], "medicin": [4, 6], "healthcar": [4, 6], "patient": [4, 6], "treatment": [4, 6], "aid": [4, 6], "therapi": [4, 6], "plan": [4, 6], "To": [4, 6], "desir": [4, 6], "similar": [4, 6], "256": [4, 6], "4": [4, 6], "300": [4, 6], "pleas": 5, "below": 5, "cd": 5, "note": 5, "sure": 5, "same": 5, "directori": 5, "setup": 5, "py": 5, "file": 5, "resid": 5, "so": 5, "far": 5, "avail": 5, "pypi": 5, "code": 7}, "objects": {"mambular.base_models.classifier": [[0, 0, 1, "", "BaseMambularClassifier"]], "mambular.base_models.classifier.BaseMambularClassifier": [[0, 1, 1, "", "acc"], [0, 1, 1, "", "auroc"], [0, 1, 1, "", "cat_embeddings"], [0, 2, 1, "id0", "configure_optimizers"], [0, 1, 1, "", "embedding_activation"], [0, 2, 1, "id1", "forward"], [0, 1, 1, "", "loss_fct"], [0, 1, 1, "", "mamba"], [0, 1, 1, "", "norm_f"], [0, 1, 1, "", "num_embeddings"], [0, 1, 1, "", "pooling_method"], [0, 1, 1, "", "precision"], [0, 1, 1, "", "tabular_head"], [0, 2, 1, "id2", "training_step"], [0, 2, 1, "id3", "validation_step"]], "mambular.base_models.distributional": [[0, 0, 1, "", "BaseMambularLSS"]], "mambular.base_models.distributional.BaseMambularLSS": [[0, 2, 1, "id4", "configure_optimizers"], [0, 2, 1, "id5", "forward"], [0, 1, 1, "", "loss_fct"], [0, 1, 1, "", "mamba"], [0, 1, 1, "", "norm_f"], [0, 1, 1, "", "tabular_head"], [0, 2, 1, "id6", "training_step"], [0, 2, 1, "id7", "validation_step"]], "mambular.base_models.embedding_classifier": [[0, 0, 1, "", "BaseEmbeddingMambularClassifier"]], "mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier": [[0, 2, 1, "id8", "configure_optimizers"], [0, 2, 1, "id9", "forward"], [0, 1, 1, "", "mamba"], [0, 1, 1, "", "norm_f"], [0, 1, 1, "", "tabular_head"], [0, 2, 1, "id10", "training_step"], [0, 2, 1, "id11", "validation_step"]], "mambular.base_models.embedding_regressor": [[0, 0, 1, "", "BaseEmbeddingMambularRegressor"]], "mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor": [[0, 2, 1, "id12", "configure_optimizers"], [0, 2, 1, "id13", "forward"], [0, 1, 1, "", "loss_fct"], [0, 1, 1, "", "mamba"], [0, 1, 1, "", "norm_f"], [0, 1, 1, "", "tabular_head"], [0, 2, 1, "id14", "training_step"], [0, 2, 1, "id15", "validation_step"]], "mambular.base_models.regressor": [[0, 0, 1, "", "BaseMambularRegressor"]], "mambular.base_models.regressor.BaseMambularRegressor": [[0, 2, 1, "id16", "configure_optimizers"], [0, 2, 1, "id17", "forward"], [0, 1, 1, "", "loss_fct"], [0, 1, 1, "", "mamba"], [0, 1, 1, "", "norm_f"], [0, 1, 1, "", "tabular_head"], [0, 1, 1, "", "train_mse"], [0, 2, 1, "id18", "training_step"], [0, 1, 1, "", "val_mse"], [0, 2, 1, "id19", "validation_step"]], "mambular.models.sklearn_classifier": [[2, 0, 1, "", "MambularClassifier"]], "mambular.models.sklearn_classifier.MambularClassifier": [[2, 1, 1, "", "config"], [2, 1, 1, "", "model"], [2, 1, 1, "", "preprocessor"]], "mambular.models.sklearn_distributional": [[2, 0, 1, "", "MambularLSS"]], "mambular.models.sklearn_distributional.MambularLSS": [[2, 1, 1, "", "config"], [2, 1, 1, "", "model"], [2, 1, 1, "", "preprocessor"]], "mambular.models.sklearn_embedding_classifier": [[2, 0, 1, "", "EmbeddingMambularClassifier"]], "mambular.models.sklearn_embedding_classifier.EmbeddingMambularClassifier": [[2, 1, 1, "", "config"], [2, 1, 1, "", "model"], [2, 1, 1, "", "preprocessor"]], "mambular.models.sklearn_embedding_regressor": [[2, 0, 1, "", "EmbeddingMambularRegressor"]], "mambular.models.sklearn_embedding_regressor.EmbeddingMambularRegressor": [[2, 1, 1, "", "config"], [2, 1, 1, "", "model"], [2, 1, 1, "", "preprocessor"]], "mambular.models.sklearn_regressor": [[2, 0, 1, "", "MambularRegressor"]], "mambular.models.sklearn_regressor.MambularRegressor": [[2, 1, 1, "", "config"], [2, 1, 1, "", "model"], [2, 1, 1, "", "preprocessor"]]}, "objtypes": {"0": "py:class", "1": "py:attribute", "2": "py:method"}, "objnames": {"0": ["py", "class", "Python class"], "1": ["py", "attribute", "Python attribute"], "2": ["py", "method", "Python method"]}, "titleterms": {"base": 0, "model": [0, 2, 3, 4, 6], "basemodel": 1, "mamba": [4, 6], "tabluar": [4, 6], "featur": [4, 6], "instal": [4, 5, 6], "advanc": [4, 6], "preprocess": [4, 6], "optim": [4, 6], "perform": [4, 6], "intellig": [4, 6], "data": [4, 6], "type": [4, 6], "detect": [4, 6], "transform": [4, 6], "handl": [4, 6], "miss": [4, 6], "valu": [4, 6], "flexibl": [4, 6], "customiz": [4, 6], "fit": [4, 6], "distribut": [4, 6], "regress": [4, 6], "mambularlss": [4, 6], "kei": [4, 6], "avail": [4, 6], "class": [4, 6], "us": [4, 6], "case": [4, 6], "get": [4, 6], "start": [4, 6], "quickstart": 7}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.viewcode": 1, "sphinx.ext.intersphinx": 1, "sphinx.ext.todo": 2, "nbsphinx": 4, "sphinx": 57}, "alltitles": {"Base Models": [[0, "base-models"]], "BaseModels": [[1, "basemodels"]], "Models": [[2, "models"], [3, "models"]], "Mamba-Tabluar": [[4, "mamba-tabluar"], [6, "mamba-tabluar"]], "Features": [[4, "features"], [6, "features"]], "Installation": [[4, "installation"], [5, "installation"], [6, "installation"]], "Advanced Preprocessing for Optimal Performance": [[4, "advanced-preprocessing-for-optimal-performance"], [6, "advanced-preprocessing-for-optimal-performance"]], "Intelligent Data Type Detection and Transformation": [[4, "intelligent-data-type-detection-and-transformation"], [6, "intelligent-data-type-detection-and-transformation"]], "Handling Missing Values": [[4, "handling-missing-values"], [6, "handling-missing-values"]], "Flexible and Customizable": [[4, "flexible-and-customizable"], [6, "flexible-and-customizable"]], "Fit a Model": [[4, "fit-a-model"], [6, "fit-a-model"]], "Distributional Regression with MambularLSS": [[4, "distributional-regression-with-mambularlss"], [6, "distributional-regression-with-mambularlss"]], "Key Features of MambularLSS:": [[4, "key-features-of-mambularlss"], [6, "key-features-of-mambularlss"]], "Available Distribution Classes:": [[4, "available-distribution-classes"], [6, "available-distribution-classes"]], "Use Cases for MambularLSS:": [[4, "use-cases-for-mambularlss"], [6, "use-cases-for-mambularlss"]], "Getting Started with MambularLSS:": [[4, "getting-started-with-mambularlss"], [6, "getting-started-with-mambularlss"]], "Quickstart": [[7, "quickstart"]]}, "indexentries": {"baseembeddingmambularclassifier (class in mambular.base_models.embedding_classifier)": [[0, "mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier"]], "baseembeddingmambularregressor (class in mambular.base_models.embedding_regressor)": [[0, "mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor"]], "basemambularclassifier (class in mambular.base_models.classifier)": [[0, "mambular.base_models.classifier.BaseMambularClassifier"]], "basemambularlss (class in mambular.base_models.distributional)": [[0, "mambular.base_models.distributional.BaseMambularLSS"]], "basemambularregressor (class in mambular.base_models.regressor)": [[0, "mambular.base_models.regressor.BaseMambularRegressor"]], "acc (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.acc"]], "auroc (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.auroc"]], "cat_embeddings (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.cat_embeddings"]], "configure_optimizers() (mambular.base_models.classifier.basemambularclassifier method)": [[0, "id0"], [0, "mambular.base_models.classifier.BaseMambularClassifier.configure_optimizers"]], "configure_optimizers() (mambular.base_models.distributional.basemambularlss method)": [[0, "id4"], [0, "mambular.base_models.distributional.BaseMambularLSS.configure_optimizers"]], "configure_optimizers() (mambular.base_models.embedding_classifier.baseembeddingmambularclassifier method)": [[0, "id8"], [0, "mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier.configure_optimizers"]], "configure_optimizers() (mambular.base_models.embedding_regressor.baseembeddingmambularregressor method)": [[0, "id12"], [0, "mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor.configure_optimizers"]], "configure_optimizers() (mambular.base_models.regressor.basemambularregressor method)": [[0, "id16"], [0, "mambular.base_models.regressor.BaseMambularRegressor.configure_optimizers"]], "embedding_activation (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.embedding_activation"]], "forward() (mambular.base_models.classifier.basemambularclassifier method)": [[0, "id1"], [0, "mambular.base_models.classifier.BaseMambularClassifier.forward"]], "forward() (mambular.base_models.distributional.basemambularlss method)": [[0, "id5"], [0, "mambular.base_models.distributional.BaseMambularLSS.forward"]], "forward() (mambular.base_models.embedding_classifier.baseembeddingmambularclassifier method)": [[0, "id9"], [0, "mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier.forward"]], "forward() (mambular.base_models.embedding_regressor.baseembeddingmambularregressor method)": [[0, "id13"], [0, "mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor.forward"]], "forward() (mambular.base_models.regressor.basemambularregressor method)": [[0, "id17"], [0, "mambular.base_models.regressor.BaseMambularRegressor.forward"]], "loss_fct (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.loss_fct"]], "loss_fct (mambular.base_models.distributional.basemambularlss attribute)": [[0, "mambular.base_models.distributional.BaseMambularLSS.loss_fct"]], "loss_fct (mambular.base_models.embedding_regressor.baseembeddingmambularregressor attribute)": [[0, "mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor.loss_fct"]], "loss_fct (mambular.base_models.regressor.basemambularregressor attribute)": [[0, "mambular.base_models.regressor.BaseMambularRegressor.loss_fct"]], "mamba (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.mamba"]], "mamba (mambular.base_models.distributional.basemambularlss attribute)": [[0, "mambular.base_models.distributional.BaseMambularLSS.mamba"]], "mamba (mambular.base_models.embedding_classifier.baseembeddingmambularclassifier attribute)": [[0, "mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier.mamba"]], "mamba (mambular.base_models.embedding_regressor.baseembeddingmambularregressor attribute)": [[0, "mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor.mamba"]], "mamba (mambular.base_models.regressor.basemambularregressor attribute)": [[0, "mambular.base_models.regressor.BaseMambularRegressor.mamba"]], "norm_f (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.norm_f"]], "norm_f (mambular.base_models.distributional.basemambularlss attribute)": [[0, "mambular.base_models.distributional.BaseMambularLSS.norm_f"]], "norm_f (mambular.base_models.embedding_classifier.baseembeddingmambularclassifier attribute)": [[0, "mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier.norm_f"]], "norm_f (mambular.base_models.embedding_regressor.baseembeddingmambularregressor attribute)": [[0, "mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor.norm_f"]], "norm_f (mambular.base_models.regressor.basemambularregressor attribute)": [[0, "mambular.base_models.regressor.BaseMambularRegressor.norm_f"]], "num_embeddings (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.num_embeddings"]], "pooling_method (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.pooling_method"]], "precision (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.precision"]], "tabular_head (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.tabular_head"]], "tabular_head (mambular.base_models.distributional.basemambularlss attribute)": [[0, "mambular.base_models.distributional.BaseMambularLSS.tabular_head"]], "tabular_head (mambular.base_models.embedding_classifier.baseembeddingmambularclassifier attribute)": [[0, "mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier.tabular_head"]], "tabular_head (mambular.base_models.embedding_regressor.baseembeddingmambularregressor attribute)": [[0, "mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor.tabular_head"]], "tabular_head (mambular.base_models.regressor.basemambularregressor attribute)": [[0, "mambular.base_models.regressor.BaseMambularRegressor.tabular_head"]], "train_mse (mambular.base_models.regressor.basemambularregressor attribute)": [[0, "mambular.base_models.regressor.BaseMambularRegressor.train_mse"]], "training_step() (mambular.base_models.classifier.basemambularclassifier method)": [[0, "id2"], [0, "mambular.base_models.classifier.BaseMambularClassifier.training_step"]], "training_step() (mambular.base_models.distributional.basemambularlss method)": [[0, "id6"], [0, "mambular.base_models.distributional.BaseMambularLSS.training_step"]], "training_step() (mambular.base_models.embedding_classifier.baseembeddingmambularclassifier method)": [[0, "id10"], [0, "mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier.training_step"]], "training_step() (mambular.base_models.embedding_regressor.baseembeddingmambularregressor method)": [[0, "id14"], [0, "mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor.training_step"]], "training_step() (mambular.base_models.regressor.basemambularregressor method)": [[0, "id18"], [0, "mambular.base_models.regressor.BaseMambularRegressor.training_step"]], "val_mse (mambular.base_models.regressor.basemambularregressor attribute)": [[0, "mambular.base_models.regressor.BaseMambularRegressor.val_mse"]], "validation_step() (mambular.base_models.classifier.basemambularclassifier method)": [[0, "id3"], [0, "mambular.base_models.classifier.BaseMambularClassifier.validation_step"]], "validation_step() (mambular.base_models.distributional.basemambularlss method)": [[0, "id7"], [0, "mambular.base_models.distributional.BaseMambularLSS.validation_step"]], "validation_step() (mambular.base_models.embedding_classifier.baseembeddingmambularclassifier method)": [[0, "id11"], [0, "mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier.validation_step"]], "validation_step() (mambular.base_models.embedding_regressor.baseembeddingmambularregressor method)": [[0, "id15"], [0, "mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor.validation_step"]], "validation_step() (mambular.base_models.regressor.basemambularregressor method)": [[0, "id19"], [0, "mambular.base_models.regressor.BaseMambularRegressor.validation_step"]], "embeddingmambularclassifier (class in mambular.models.sklearn_embedding_classifier)": [[2, "mambular.models.sklearn_embedding_classifier.EmbeddingMambularClassifier"]], "embeddingmambularregressor (class in mambular.models.sklearn_embedding_regressor)": [[2, "mambular.models.sklearn_embedding_regressor.EmbeddingMambularRegressor"]], "mambularclassifier (class in mambular.models.sklearn_classifier)": [[2, "mambular.models.sklearn_classifier.MambularClassifier"]], "mambularlss (class in mambular.models.sklearn_distributional)": [[2, "mambular.models.sklearn_distributional.MambularLSS"]], "mambularregressor (class in mambular.models.sklearn_regressor)": [[2, "mambular.models.sklearn_regressor.MambularRegressor"]], "config (mambular.models.sklearn_classifier.mambularclassifier attribute)": [[2, "mambular.models.sklearn_classifier.MambularClassifier.config"]], "config (mambular.models.sklearn_distributional.mambularlss attribute)": [[2, "mambular.models.sklearn_distributional.MambularLSS.config"]], "config (mambular.models.sklearn_embedding_classifier.embeddingmambularclassifier attribute)": [[2, "mambular.models.sklearn_embedding_classifier.EmbeddingMambularClassifier.config"]], "config (mambular.models.sklearn_embedding_regressor.embeddingmambularregressor attribute)": [[2, "mambular.models.sklearn_embedding_regressor.EmbeddingMambularRegressor.config"]], "config (mambular.models.sklearn_regressor.mambularregressor attribute)": [[2, "mambular.models.sklearn_regressor.MambularRegressor.config"]], "model (mambular.models.sklearn_classifier.mambularclassifier attribute)": [[2, "mambular.models.sklearn_classifier.MambularClassifier.model"]], "model (mambular.models.sklearn_distributional.mambularlss attribute)": [[2, "mambular.models.sklearn_distributional.MambularLSS.model"]], "model (mambular.models.sklearn_embedding_classifier.embeddingmambularclassifier attribute)": [[2, "mambular.models.sklearn_embedding_classifier.EmbeddingMambularClassifier.model"]], "model (mambular.models.sklearn_embedding_regressor.embeddingmambularregressor attribute)": [[2, "mambular.models.sklearn_embedding_regressor.EmbeddingMambularRegressor.model"]], "model (mambular.models.sklearn_regressor.mambularregressor attribute)": [[2, "mambular.models.sklearn_regressor.MambularRegressor.model"]], "preprocessor (mambular.models.sklearn_classifier.mambularclassifier attribute)": [[2, "mambular.models.sklearn_classifier.MambularClassifier.preprocessor"]], "preprocessor (mambular.models.sklearn_distributional.mambularlss attribute)": [[2, "mambular.models.sklearn_distributional.MambularLSS.preprocessor"]], "preprocessor (mambular.models.sklearn_embedding_classifier.embeddingmambularclassifier attribute)": [[2, "mambular.models.sklearn_embedding_classifier.EmbeddingMambularClassifier.preprocessor"]], "preprocessor (mambular.models.sklearn_embedding_regressor.embeddingmambularregressor attribute)": [[2, "mambular.models.sklearn_embedding_regressor.EmbeddingMambularRegressor.preprocessor"]], "preprocessor (mambular.models.sklearn_regressor.mambularregressor attribute)": [[2, "mambular.models.sklearn_regressor.MambularRegressor.preprocessor"]]}}) \ No newline at end of file diff --git a/docs/_build/html/_modules/index.html b/docs/_build/html/_modules/index.html deleted file mode 100644 index b3350a0..0000000 --- a/docs/_build/html/_modules/index.html +++ /dev/null @@ -1,128 +0,0 @@ - - - - - - Overview: module code — mambular 26.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- - -
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/mambular/base_models/classifier.html b/docs/_build/html/_modules/mambular/base_models/classifier.html deleted file mode 100644 index 3d270af..0000000 --- a/docs/_build/html/_modules/mambular/base_models/classifier.html +++ /dev/null @@ -1,507 +0,0 @@ - - - - - - mambular.base_models.classifier — mambular 26.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mambular.base_models.classifier

-import torch
-import torch.nn as nn
-import torchmetrics
-from ..utils.mamba_arch import Mamba
-from ..utils.config import MambularConfig
-import pytorch_lightning as pl
-
-
-
[docs]class BaseMambularClassifier(pl.LightningModule): - """ - A base class for building classification models using the Mambular architecture within the PyTorch Lightning framework. - - This class integrates various components such as embeddings for categorical and numerical features, the Mambular model - for processing sequences of embeddings, and a classification head for prediction. It supports multi-class and binary classification tasks. - - Parameters - ---------- - num_classes : int - The number of classes in the classification task. For binary classification, this should be 2. - config : MambularConfig - An instance of MambularConfig containing configuration parameters for the Mambular model. - cat_feature_info : dict, optional - A dictionary mapping the names of categorical features to their number of unique categories. - This information is used to configure embedding layers for categorical features. Defaults to None. - num_feature_info : dict, optional - A dictionary mapping the names of numerical features to the size of their input dimensions. - This information is used to configure embedding layers for numerical features. Defaults to None. - lr : float, optional - The learning rate for the optimizer. Defaults to 1e-03. - lr_patience : int, optional - The number of epochs with no improvement after which learning rate will be reduced. Defaults to 10. - weight_decay : float, optional - Weight decay (L2 penalty) parameter for the optimizer. Defaults to 0.025. - lr_factor : float, optional - Factor by which the learning rate will be reduced. Defaults to 0.75. - - Attributes - ---------- - embedding_activation : nn.Module - The activation function to be applied after the linear transformation of numerical features. - num_embeddings : nn.ModuleList - A list of sequential modules, each corresponding to an embedding layer for a numerical feature. - cat_embeddings : nn.ModuleList - A list of embedding layers, each corresponding to a categorical feature. - mamba : Mamba - The Mambular model for processing sequences of embeddings. - norm_f : nn.Module - A normalization layer applied after the Mambular model. - tabular_head : nn.Linear - A linear layer for predicting the class labels from the aggregated embedding representation. - pooling_method : str - The method used to aggregate embeddings across features. Supported methods are 'avg', 'max', and 'sum'. - loss_fct : nn.Module - The loss function used for training the model, configured based on the number of classes. - acc : torchmetrics.Accuracy - A metric for computing the accuracy of predictions. - auroc : torchmetrics.AUROC - A metric for computing the Area Under the Receiver Operating Characteristic curve. - precision : torchmetrics.Precision - A metric for computing the precision of predictions. - - Methods - ------- - forward(cat_features, num_features) - Defines the forward pass of the model, processing both categorical and numerical features, aggregating embeddings, - and producing predictions. - training_step(batch, batch_idx) - Performs a single training step, computing the loss and logging metrics for the training set. - validation_step(batch, batch_idx) - Performs a single validation step, computing the loss and logging metrics for the validation set. - configure_optimizers() - Configures the model's optimizers and learning rate schedulers. - """ - - - - - - - def __init__( - self, - num_classes, - config: MambularConfig, - cat_feature_info: dict = None, - num_feature_info: dict = None, - lr=1e-03, - lr_patience=10, - weight_decay=0.025, - lr_factor=0.75, - ): - super().__init__() - - self.config = config - self.num_classes = 1 if num_classes == 2 else num_classes - self.lr = lr - self.lr_patience = lr_patience - self.weight_decay = weight_decay - self.lr_factor = lr_factor - self.cat_feature_info = cat_feature_info - self.num_feature_info = num_feature_info - - activations = { - "relu": nn.ReLU(), - "tanh": nn.Tanh(), - "sigmoid": nn.Sigmoid(), - "leaky_relu": nn.LeakyReLU(), - "elu": nn.ELU(), - "selu": nn.SELU(), - "gelu": nn.GELU(), - "softplus": nn.Softplus(), - "leakyrelu": nn.LeakyReLU(), - "linear": nn.Identity(), - } - - self.embedding_activation = activations.get( - self.config.num_embedding_activation.lower() - ) - if self.embedding_activation is None: - raise ValueError( - f"Unsupported activation function: {self.config.num_embedding_activation}" - ) - - self.num_embeddings = nn.ModuleList( - [ - nn.Sequential( - nn.Linear(input_shape, self.config.d_model, bias=False), - nn.BatchNorm1d(self.config.d_model), - self.embedding_activation, # Example using ReLU as the activation function, change as needed - ) - for feature_name, input_shape in num_feature_info.items() - ] - ) - - # Create embedding layers for categorical features based on cat_feature_info - self.cat_embeddings = nn.ModuleList( - [ - nn.Embedding(num_categories + 1, self.config.d_model) - for feature_name, num_categories in cat_feature_info.items() - ] - ) - - self.mamba = Mamba(self.config) - self.norm_f = self.config.norm(self.config.d_model) - - mlp_activation_fn = activations.get( - self.config.tabular_head_activation.lower(), nn.Identity() - ) - mlp_layers = [] - input_dim = self.config.d_model # Initial input dimension - - # Iterate over the specified units for each layer in the MLP - for units in self.config.tabular_head_units: - mlp_layers.append(nn.Linear(input_dim, units)) - mlp_layers.append(mlp_activation_fn) - mlp_layers.append(nn.Dropout(self.config.tabular_head_dropout)) - input_dim = units - - # Add the final linear layer to map to a single output value - mlp_layers.append(nn.Linear(input_dim, self.num_classes)) - - # Combine all layers into a Sequential module - self.tabular_head = nn.Sequential(*mlp_layers) - - self.pooling_method = self.config.pooling_method - self.cls_token = nn.Parameter(torch.zeros(1, 1, self.config.d_model)) - - if self.num_classes > 2: - self.loss_fct = nn.CrossEntropyLoss() - self.acc = torchmetrics.Accuracy( - task="multiclass", num_classes=self.num_classes - ) - self.auroc = torchmetrics.AUROC( - task="multiclass", num_classes=self.num_classes - ) - self.precision = torchmetrics.Precision( - task="multiclass", num_classes=self.num_classes - ) - else: - self.loss_fct = torch.nn.BCEWithLogitsLoss() - self.acc = torchmetrics.Accuracy(task="binary") - self.auroc = torchmetrics.AUROC(task="binary") - self.precision = torchmetrics.Precision(task="binary") - -
[docs] def forward(self, cat_features, num_features): - """ - Defines the forward pass of the classifier. - - Parameters - ---------- - cat_features : Tensor - Tensor containing the categorical features. - num_features : Tensor - Tensor containing the numerical features. - - Returns - ------- - Tensor - The output predictions of the model. - """ - batch_size = ( - cat_features[0].size(0) - if cat_features is not None - else num_features[0].size(0) - ) - cls_tokens = self.cls_token.expand(batch_size, -1, -1) - - # Process categorical features if present - if len(self.cat_embeddings) > 0 and cat_features: - cat_embeddings = [ - emb(cat_features[i]) for i, emb in enumerate(self.cat_embeddings) - ] - cat_embeddings = torch.stack(cat_embeddings, dim=1) - cat_embeddings = torch.squeeze(cat_embeddings, dim=2) - else: - cat_embeddings = None - - # Process numerical features if present - if len(self.num_embeddings) > 0 and num_features: - num_embeddings = [ - emb(num_features[i]) for i, emb in enumerate(self.num_embeddings) - ] - num_embeddings = torch.stack(num_embeddings, dim=1) - else: - num_embeddings = None - - # Combine embeddings if both types are present, otherwise use whichever is available - if cat_embeddings is not None and num_embeddings is not None: - x = torch.cat([cls_tokens, cat_embeddings, num_embeddings], dim=1) - elif cat_embeddings is not None: - x = torch.cat([cls_tokens, cat_embeddings], dim=1) - elif num_embeddings is not None: - x = torch.cat([cls_tokens, num_embeddings], dim=1) - else: - raise ValueError("No features provided to the model.") - - x = self.mamba(x) - - # Apply pooling based on the specified method - if self.pooling_method == "avg": - x = torch.mean(x, dim=1) - elif self.pooling_method == "max": - x, _ = torch.max(x, dim=1) - elif self.pooling_method == "sum": - x = torch.sum(x, dim=1) - elif self.pooling_method == "cls": - x = x[:, 0] - else: - raise ValueError(f"Invalid pooling method: {self.pooling_method}") - - x = self.norm_f(x) - preds = self.tabular_head(x) - - return preds
- -
[docs] def training_step(self, batch, batch_idx): - """ - Processes a single batch during training, computes the loss and logs training metrics. - - Parameters - ---------- - batch : tuple - A batch of data from the DataLoader, containing numerical features, categorical features, and labels. - batch_idx : int - The index of the batch within the epoch. - """ - - num_features, cat_features, labels = batch - preds = self(num_features, cat_features) - - if self.num_classes == 1: - labels = labels.unsqueeze( - 1 - ).float() # Reshape for binary classification loss calculation - - loss = self.loss_fct(preds, labels) - self.log("train_loss", loss) - # Calculate and log training accuracy - - acc = self.acc(preds, labels.int()) - self.log( - "train_acc", - acc, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - # Calculate and log AUROC - auroc = self.auroc(preds, labels.int()) - self.log( - "train_auroc", - auroc, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - # Calculate and log precision - precision = self.precision(preds, labels.int()) - self.log( - "train_precision", - precision, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - return loss
- -
[docs] def validation_step(self, batch, batch_idx): - """ - Processes a single batch during validation, computes the loss and logs validation metrics. - - Parameters - ---------- - batch : tuple - A batch of data from the DataLoader, containing numerical features, categorical features, and labels. - batch_idx : int - The index of the batch within the epoch. - """ - - num_features, cat_features, labels = batch - preds = self(num_features, cat_features) - - if self.num_classes == 1: - labels = labels.unsqueeze( - 1 - ).float() # Reshape for binary classification loss calculation - - loss = self.loss_fct(preds, labels) - self.log("val_loss", loss) - # Calculate and log training accuracy - - acc = self.acc(preds, labels.int()) - self.log( - "val_acc", - acc, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - auroc = self.auroc(preds, labels.int()) - self.log( - "val_auroc", auroc, on_step=False, on_epoch=True, prog_bar=True, logger=True - ) - - # Calculate and log precision - precision = self.precision(preds, labels.int()) - self.log( - "val_precision", - precision, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - )
- -
[docs] def configure_optimizers(self): - """ - Sets up the model's optimizer and learning rate scheduler based on the configurations provided. - - Returns - ------- - dict - A dictionary containing the optimizer and lr_scheduler configurations. - """ - optimizer = torch.optim.Adam( - self.parameters(), lr=self.lr, weight_decay=self.config.weight_decay - ) - scheduler = { - "scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau( - optimizer, - mode="min", - factor=self.lr_factor, - patience=self.lr_patience, - verbose=True, - ), - "monitor": "val_loss", # Name of the metric to monitor - "interval": "epoch", - "frequency": 1, - } - - return {"optimizer": optimizer, "lr_scheduler": scheduler}
-
- -
-
-
- -
- -
-

© Copyright 2024, Christoph Weisser.

-
- - Built with Sphinx using a - theme - provided by Read the Docs. - - -
-
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/mambular/base_models/distributional.html b/docs/_build/html/_modules/mambular/base_models/distributional.html deleted file mode 100644 index d592865..0000000 --- a/docs/_build/html/_modules/mambular/base_models/distributional.html +++ /dev/null @@ -1,456 +0,0 @@ - - - - - - mambular.base_models.distributional — mambular 26.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mambular.base_models.distributional

-import torch
-import torch.nn as nn
-from ..utils.mamba_arch import Mamba
-from ..utils.config import MambularConfig
-import pytorch_lightning as pl
-from ..utils.distributions import (
-    NormalDistribution,
-    NegativeBinomialDistribution,
-    GammaDistribution,
-    StudentTDistribution,
-    PoissonDistribution,
-    InverseGammaDistribution,
-    BetaDistribution,
-    DirichletDistribution,
-    CategoricalDistribution,
-)
-
-
-
[docs]class BaseMambularLSS(pl.LightningModule): - """ - A base module for likelihood-based statistical learning (LSS) models built on PyTorch Lightning, - integrating the Mamba architecture for tabular data. This module is designed to accommodate various - statistical distribution families for different types of regression and classification tasks. - - Parameters - ---------- - family : str - The name of the statistical distribution family to be used for modeling. Supported families include - 'normal', 'poisson', 'gamma', 'beta', 'dirichlet', 'studentt', 'negativebinom', 'inversegamma', and 'categorical'. - config : MambularConfig - An instance of MambularConfig containing configuration parameters for the model architecture. - cat_feature_info : dict, optional - A dictionary mapping the names of categorical features to their number of unique categories. Defaults to None. - num_feature_info : dict, optional - A dictionary mapping the names of numerical features to their number of dimensions after embedding. Defaults to None. - lr : float, optional - The initial learning rate for the optimizer. Defaults to 1e-03. - lr_patience : int, optional - The number of epochs with no improvement after which learning rate will be reduced. Defaults to 10. - weight_decay : float, optional - Weight decay (L2 penalty) coefficient. Defaults to 0.025. - lr_factor : float, optional - Factor by which the learning rate will be reduced. Defaults to 0.75. - **distribution_params : - Additional parameters specific to the chosen statistical distribution family. - - Attributes - ---------- - mamba : Mamba - The core neural network module implementing the Mamba architecture. - norm_f : nn.Module - Normalization layer applied after the Mamba block. - tabular_head : nn.Linear - Final linear layer mapping the features to the parameters of the chosen statistical distribution. - loss_fct : callable - The loss function derived from the chosen statistical distribution. - - Methods - ------- - forward(cat_features, num_features) - Defines the forward pass of the model. - training_step(batch, batch_idx) - Processes a single batch during training. - validation_step(batch, batch_idx) - Processes a single batch during validation. - configure_optimizers() - Sets up the model's optimizer and learning rate scheduler. - """ - - - - - def __init__( - self, - family, - config: MambularConfig, - cat_feature_info: dict = None, - num_feature_info: dict = None, - lr=1e-03, - lr_patience=10, - weight_decay=0.025, - lr_factor=0.75, - **distribution_params, - ): - super().__init__() - - self.config = config - self.lr = lr - self.lr_patience = lr_patience - self.weight_decay = weight_decay - self.lr_factor = lr_factor - self.cat_feature_info = cat_feature_info - self.num_feature_info = num_feature_info - - activations = { - "relu": nn.ReLU(), - "tanh": nn.Tanh(), - "sigmoid": nn.Sigmoid(), - "leaky_relu": nn.LeakyReLU(), - "elu": nn.ELU(), - "selu": nn.SELU(), - "gelu": nn.GELU(), - "softplus": nn.Softplus(), - "leakyrelu": nn.LeakyReLU(), - "linear": nn.Identity(), - } - - distribution_classes = { - "normal": NormalDistribution, - "poisson": PoissonDistribution, - "gamma": GammaDistribution, - "beta": BetaDistribution, - "dirichlet": DirichletDistribution, - "studentt": StudentTDistribution, - "negativebinom": NegativeBinomialDistribution, - "inversegamma": InverseGammaDistribution, - "categorical": CategoricalDistribution, - } - - if family in distribution_classes: - # Pass additional distribution_params to the constructor of the distribution class - self.family = distribution_classes[family](**distribution_params) - else: - raise ValueError("Unsupported family: {}".format(family)) - - self.embedding_activation = activations.get( - self.config.num_embedding_activation.lower() - ) - if self.embedding_activation is None: - raise ValueError( - f"Unsupported activation function: {self.config.num_embedding_activation}" - ) - - self.num_embeddings = nn.ModuleList( - [ - nn.Sequential( - nn.Linear(input_shape, self.config.d_model, bias=False), - nn.BatchNorm1d(self.config.d_model), - self.embedding_activation, # Example using ReLU as the activation function, change as needed - ) - for feature_name, input_shape in num_feature_info.items() - ] - ) - - # Create embedding layers for categorical features based on cat_feature_info - self.cat_embeddings = nn.ModuleList( - [ - nn.Embedding(num_categories + 1, self.config.d_model) - for feature_name, num_categories in cat_feature_info.items() - ] - ) - - mlp_activation_fn = activations.get( - self.config.tabular_head_activation.lower(), nn.Identity() - ) - - self.mamba = Mamba(self.config) - self.norm_f = self.config.norm(self.config.d_model) - mlp_layers = [] - input_dim = self.config.d_model # Initial input dimension - - # Iterate over the specified units for each layer in the MLP - for units in self.config.tabular_head_units: - mlp_layers.append(nn.Linear(input_dim, units)) - mlp_layers.append(mlp_activation_fn) - mlp_layers.append(nn.Dropout(self.config.tabular_head_dropout)) - input_dim = units - - # Add the final linear layer to map to #distributional param output values - mlp_layers.append(nn.Linear(input_dim, self.family.param_count)) - - # Combine all layers into a Sequential module - self.tabular_head = nn.Sequential(*mlp_layers) - - self.loss_fct = lambda predictions, y_true: self.family.compute_loss( - predictions, y_true - ) - - self.cls_token = nn.Parameter(torch.zeros(1, 1, self.config.d_model)) - self.pooling_method = self.config.pooling_method - -
[docs] def forward(self, cat_features, num_features): - """ - Defines the forward pass of the model, processing both categorical and numerical features, - and returning predictions based on the configured statistical distribution. - - Parameters - ---------- - cat_features : Tensor - Tensor containing the categorical features. - num_features : Tensor - Tensor containing the numerical features. - - Returns - ------- - Tensor - The predictions of the model, typically the parameters of the chosen statistical distribution. - """ - - batch_size = ( - cat_features[0].size(0) - if cat_features is not None - else num_features[0].size(0) - ) - cls_tokens = self.cls_token.expand(batch_size, -1, -1) - - # Process categorical features if present - if len(self.cat_embeddings) > 0 and cat_features: - cat_embeddings = [ - emb(cat_features[i]) for i, emb in enumerate(self.cat_embeddings) - ] - cat_embeddings = torch.stack(cat_embeddings, dim=1) - cat_embeddings = torch.squeeze(cat_embeddings, dim=2) - else: - cat_embeddings = None - - # Process numerical features if present - if len(self.num_embeddings) > 0 and num_features: - num_embeddings = [ - emb(num_features[i]) for i, emb in enumerate(self.num_embeddings) - ] - num_embeddings = torch.stack(num_embeddings, dim=1) - else: - num_embeddings = None - - # Combine embeddings if both types are present, otherwise use whichever is available - if cat_embeddings is not None and num_embeddings is not None: - x = torch.cat([cls_tokens, cat_embeddings, num_embeddings], dim=1) - elif cat_embeddings is not None: - x = torch.cat([cls_tokens, cat_embeddings], dim=1) - elif num_embeddings is not None: - x = torch.cat([cls_tokens, num_embeddings], dim=1) - else: - raise ValueError("No features provided to the model.") - - x = self.mamba(x) - - # Apply pooling based on the specified method - if self.pooling_method == "avg": - x = torch.mean(x, dim=1) - elif self.pooling_method == "max": - x, _ = torch.max(x, dim=1) - elif self.pooling_method == "sum": - x = torch.sum(x, dim=1) - elif self.pooling_method == "cls": - x = x[:, 0] - else: - raise ValueError(f"Invalid pooling method: {self.pooling_method}") - - x = self.norm_f(x) - preds = self.tabular_head(x) - - return preds
- -
[docs] def training_step(self, batch, batch_idx): - """ - Processes a single batch during training, computes the loss using the distribution-specific loss function, - and logs training metrics. - - Parameters - ---------- - batch : tuple - A batch of data from the DataLoader, containing numerical features, categorical features, and labels. - batch_idx : int - The index of the batch within the epoch. - - Returns - ------- - Tensor - The computed loss for the batch. - """ - num_features, cat_features, labels = batch - preds = self(num_features, cat_features) - - loss = self.loss_fct(preds, labels) - self.log( - "train_loss", - loss, - on_epoch=True, - prog_bar=True, - logger=True, - ) - return loss
- -
[docs] def validation_step(self, batch, batch_idx): - """ - Processes a single batch during validation, computes the loss using the distribution-specific loss function, - and logs validation metrics. - - Parameters - ---------- - batch : tuple - A batch of data from the DataLoader, containing numerical features, categorical features, and labels. - batch_idx : int - The index of the batch within the epoch. - """ - - - num_features, cat_features, labels = batch - preds = self(num_features, cat_features) - - loss = self.loss_fct(preds, labels) - self.log( - "val_loss", - loss, - on_epoch=True, - prog_bar=True, - logger=True, - ) - return loss
- -
[docs] def configure_optimizers(self): - """ - Sets up the model's optimizer and learning rate scheduler based on the configurations provided. - - Returns - ------- - dict - A dictionary containing the optimizer and lr_scheduler configurations. - """ - optimizer = torch.optim.Adam( - self.parameters(), lr=self.lr, weight_decay=self.config.weight_decay - ) - scheduler = { - "scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau( - optimizer, - mode="min", - factor=self.lr_factor, - patience=self.lr_patience, - verbose=True, - ), - "monitor": "val_loss", # Name of the metric to monitor - "interval": "epoch", - "frequency": 1, - } - - return {"optimizer": optimizer, "lr_scheduler": scheduler}
-
- -
-
-
- -
- -
-

© Copyright 2024, Christoph Weisser.

-
- - Built with Sphinx using a - theme - provided by Read the Docs. - - -
-
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/mambular/base_models/embedding_classifier.html b/docs/_build/html/_modules/mambular/base_models/embedding_classifier.html deleted file mode 100644 index 54c5e26..0000000 --- a/docs/_build/html/_modules/mambular/base_models/embedding_classifier.html +++ /dev/null @@ -1,538 +0,0 @@ - - - - - - mambular.base_models.embedding_classifier — mambular 26.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mambular.base_models.embedding_classifier

-import torch
-import torch.nn as nn
-from ..utils.mamba_arch import Mamba
-from ..utils.config import MambularConfig
-import pytorch_lightning as pl
-import torchmetrics
-
-
-
[docs]class BaseEmbeddingMambularClassifier(pl.LightningModule): - """ - A specialized classification module for protein data, built on PyTorch Lightning and integrating the Mamba architecture. - It supports embeddings for categorical features and can process raw or embedded numerical features, making it suitable - for complex protein sequence data. - - Parameters - ---------- - config : MambularConfig - Configuration parameters for the model architecture. - cat_feature_info : dict, optional - Information about categorical features, mapping feature names to the number of unique categories. - num_feature_info : dict, optional - Information about numerical features, mapping feature names to their number of dimensions after embedding. - lr : float, optional - Learning rate for the optimizer. Defaults to 1e-03. - lr_patience : int, optional - Number of epochs with no improvement after which learning rate will be reduced. Defaults to 10. - weight_decay : float, optional - Weight decay coefficient for regularization in the optimizer. Defaults to 0.025. - lr_factor : float, optional - Factor by which the learning rate will be reduced by the scheduler. Defaults to 0.75. - seq_size : int, optional - Size of sequence chunks for processing numerical features. Relevant when `raw_embeddings` is False. - raw_embeddings : bool, optional - Indicates whether to use raw numerical features directly or to process them into embeddings. Defaults to False. - - Attributes - ---------- - mamba : Mamba - The core neural network module implementing the Mamba architecture. - norm_f : nn.Module - Normalization layer applied after the Mamba block. - tabular_head : nn.Linear - Final linear layer mapping the features to the target. - - Methods - ------- - forward(cat_features, num_features) - Defines the forward pass of the model. - training_step(batch, batch_idx) - Processes a single batch during training. - validation_step(batch, batch_idx) - Processes a single batch during validation. - configure_optimizers() - Sets up the model's optimizer and learning rate scheduler. - """ - - def __init__( - self, - num_classes, - config: MambularConfig, - cat_feature_info: dict = None, - num_feature_info: dict = None, - lr=1e-03, - lr_patience=10, - weight_decay=0.025, - lr_factor=0.75, - seq_size: int = 20, - raw_embeddings=False, - ): - super().__init__() - - self.config = config - self.num_classes = 1 if num_classes == 2 else num_classes - self.lr = lr - self.lr_patience = lr_patience - self.weight_decay = weight_decay - self.lr_factor = lr_factor - self.cat_feature_info = cat_feature_info - self.num_feature_info = num_feature_info - self.seq_size = seq_size - self.raw_embeddings = raw_embeddings - - activations = { - "relu": nn.ReLU(), - "tanh": nn.Tanh(), - "sigmoid": nn.Sigmoid(), - "leaky_relu": nn.LeakyReLU(), - "elu": nn.ELU(), - "selu": nn.SELU(), - "gelu": nn.GELU(), - "softplus": nn.Softplus(), - "leakyrelu": nn.LeakyReLU(), - "linear": nn.Identity(), - } - - if not self.raw_embeddings: - data_size = len(num_feature_info.items()) - num_embedding_modules = data_size // self.seq_size - self.num_embeddings = nn.ModuleList( - [ - nn.Sequential( - nn.Linear(self.seq_size, self.config.d_model, bias=False), - self.embedding_activation, # Example using ReLU as the activation function, change as needed - ) - for _ in range(num_embedding_modules) - ] - ) - else: - data_size = len(num_feature_info.items()) - num_embedding_modules = data_size - self.num_embeddings = nn.ModuleList( - [ - nn.Sequential( - nn.Linear(1, self.config.d_model, bias=False), - self.embedding_activation, # Example using ReLU as the activation function, change as needed - ) - for _ in range(num_embedding_modules) - ] - ) - - self.cat_embeddings = nn.ModuleList( - [ - nn.Embedding(num_categories + 1, self.config.d_model) - for feature_name, num_categories in cat_feature_info.items() - ] - ) - - self.mamba = Mamba(self.config) - self.norm_f = self.config.norm(self.config.d_model) - mlp_activation_fn = activations.get( - self.config.tabular_head_activation.lower(), nn.Identity() - ) - - # Dynamically create MLP layers based on config.tabular_units - mlp_layers = [] - input_dim = self.config.d_model # Initial input dimension - - # Iterate over the specified units for each layer in the MLP - for units in self.config.tabular_head_units: - mlp_layers.append(nn.Linear(input_dim, units)) - mlp_layers.append(mlp_activation_fn) - mlp_layers.append(nn.Dropout(self.config.tabular_head_dropout)) - input_dim = units - - # Add the final linear layer to map to a single output value - mlp_layers.append(nn.Linear(input_dim, self.num_classes)) - - # Combine all layers into a Sequential module - self.tabular_head = nn.Sequential(*mlp_layers) - - self.pooling_method = self.config.pooling_method - self.cls_token = nn.Parameter(torch.zeros(1, 1, self.config.d_model)) - - if self.config.layer_norm_after_embedding: - self.embedding_norm = nn.LayerNorm(self.config.d_model) - - if self.num_classes > 2: - self.loss_fct = nn.CrossEntropyLoss() - self.acc = torchmetrics.Accuracy( - task="multiclass", num_classes=self.num_classes - ) - self.auroc = torchmetrics.AUROC( - task="multiclass", num_classes=self.num_classes - ) - self.precision = torchmetrics.Precision( - task="multiclass", num_classes=self.num_classes - ) - else: - self.loss_fct = torch.nn.BCEWithLogitsLoss() - self.acc = torchmetrics.Accuracy(task="binary") - self.auroc = torchmetrics.AUROC(task="binary") - self.precision = torchmetrics.Precision(task="binary") - -
[docs] def forward(self, cat_features, num_features): - """ - Defines the forward pass of the model, processing both categorical and numerical features, - and returning regression predictions. - - Parameters - ---------- - cat_features : Tensor - Tensor containing the categorical features. - num_features : Tensor - Tensor containing the numerical features or raw sequence data, depending on `raw_embeddings`. - - Returns - ------- - Tensor - The output predictions of the model for regression tasks. - """ - batch_size = ( - cat_features[0].size(0) if cat_features != [] else num_features[0].size(0) - ) - cls_tokens = self.cls_token.expand(batch_size, -1, -1) - # Process categorical features if present - if not self.raw_embeddings: - if len(self.cat_embeddings) > 0 and cat_features: - cat_embeddings = [ - emb(cat_features[i]) for i, emb in enumerate(self.cat_embeddings) - ] - cat_embeddings = torch.stack(cat_embeddings, dim=1) - cat_embeddings = torch.squeeze(cat_embeddings, dim=2) - else: - cat_embeddings = None - - if len(self.num_embeddings) > 0 and num_features: - num_embeddings = [] - # Iterate through the num_embeddings, taking slices of num_features for each - for i, emb in enumerate(self.num_embeddings): - # Calculate start and end indices for slicing the list - start_idx = i * self.seq_size - end_idx = start_idx + self.seq_size - - # Slice the num_features list to get the current chunk - current_chunk = num_features[start_idx:end_idx] - - # If the current_chunk is not empty, process it - if current_chunk: - # Concatenate tensors in the current chunk along dimension 1 - chunk_tensor = torch.cat(current_chunk, dim=1) - # Apply the embedding layer to the chunk_tensor - num_embeddings.append(emb(chunk_tensor)) - - # Stack the resulting embeddings along the second dimension if num_embeddings is not empty - if num_embeddings: - num_embeddings = torch.stack(num_embeddings, dim=1) - else: - num_embeddings = None - - else: - # Process categorical features if present - if len(self.cat_embeddings) > 0 and cat_features: - cat_embeddings = [ - emb(cat_features[i]) for i, emb in enumerate(self.cat_embeddings) - ] - cat_embeddings = torch.stack(cat_embeddings, dim=1) - cat_embeddings = torch.squeeze(cat_embeddings, dim=2) - if self.config.layer_norm_after_embedding: - cat_embeddings = self.embedding_norm(cat_embeddings) - else: - cat_embeddings = None - - # Process numerical features if present - if len(self.num_embeddings) > 0 and num_features: - num_embeddings = [ - emb(num_features[i]) for i, emb in enumerate(self.num_embeddings) - ] - num_embeddings = torch.stack(num_embeddings, dim=1) - if self.config.layer_norm_after_embedding: - num_embeddings = self.embedding_norm(num_embeddings) - else: - num_embeddings = None - - # Combine embeddings if both types are present, otherwise use whichever is available - if cat_embeddings is not None and num_embeddings is not None: - x = torch.cat([cls_tokens, cat_embeddings, num_embeddings], dim=1) - elif cat_embeddings is not None: - x = torch.cat([cls_tokens, cat_embeddings], dim=1) - elif num_embeddings is not None: - x = torch.cat([cls_tokens, num_embeddings], dim=1) - else: - raise ValueError("No features provided to the model.") - - x = self.mamba(x) - - # Apply pooling based on the specified method - if self.pooling_method == "avg": - x = torch.mean(x, dim=1) - elif self.pooling_method == "max": - x, _ = torch.max(x, dim=1) - elif self.pooling_method == "sum": - x = torch.sum(x, dim=1) - elif self.pooling_method == "cls_token": - x = x[:, 0] - else: - raise ValueError(f"Invalid pooling method: {self.pooling_method}") - - x = self.norm_f(x) - preds = self.tabular_head(x) - - return preds
- -
[docs] def training_step(self, batch, batch_idx): - """ - Processes a single batch during training, computes the loss, and logs training metrics. - - Parameters - ---------- - batch : tuple - A batch of data from the DataLoader, containing numerical features, categorical features, and labels. - batch_idx : int - The index of the batch within the epoch. - - Returns - ------- - Tensor - The computed loss for the batch. - """ - num_features, cat_features, labels = batch - preds = self(num_features, cat_features) - - if self.num_classes == 1: - labels = labels.unsqueeze( - 1 - ).float() # Reshape for binary classification loss calculation - - loss = self.loss_fct(preds, labels) - self.log("train_loss", loss) - # Calculate and log training accuracy - - acc = self.acc(preds, labels.int()) - self.log( - "train_acc", - acc, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - # Calculate and log AUROC - auroc = self.auroc(preds, labels.int()) - self.log( - "train_auroc", - auroc, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - # Calculate and log precision - precision = self.precision(preds, labels.int()) - self.log( - "train_precision", - precision, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - return loss
- -
[docs] def validation_step(self, batch, batch_idx): - """ - Processes a single batch during validation, computes the loss, and logs validation metrics. - - Parameters - ---------- - batch : tuple - A batch of data from the DataLoader, containing numerical features, categorical features, and labels. - batch_idx : int - The index of the batch within the epoch. - """ - num_features, cat_features, labels = batch - preds = self(num_features, cat_features) - - if self.num_classes == 1: - labels = labels.unsqueeze( - 1 - ).float() # Reshape for binary classification loss calculation - - loss = self.loss_fct(preds, labels) - self.log("val_loss", loss) - # Calculate and log training accuracy - - acc = self.acc(preds, labels.int()) - self.log( - "val_acc", - acc, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - auroc = self.auroc(preds, labels.int()) - self.log( - "val_auroc", auroc, on_step=False, on_epoch=True, prog_bar=True, logger=True - ) - - # Calculate and log precision - precision = self.precision(preds, labels.int()) - self.log( - "val_precision", - precision, - on_step=False, - on_epoch=True, - prog_bar=True, - logger=True, - )
- -
[docs] def configure_optimizers(self): - """ - Sets up the model's optimizer and learning rate scheduler based on the configurations provided. - - Returns - ------- - dict - A dictionary containing the optimizer and lr_scheduler configurations. - """ - optimizer = torch.optim.Adam( - self.parameters(), lr=self.lr, weight_decay=self.config.weight_decay - ) - scheduler = { - "scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau( - optimizer, - mode="min", - factor=self.lr_factor, - patience=self.lr_patience, - verbose=True, - ), - "monitor": "val_loss", # Name of the metric to monitor - "interval": "epoch", - "frequency": 1, - } - - return {"optimizer": optimizer, "lr_scheduler": scheduler}
-
- -
-
-
- -
- -
-

© Copyright 2024, Christoph Weisser.

-
- - Built with Sphinx using a - theme - provided by Read the Docs. - - -
-
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/mambular/base_models/embedding_regressor.html b/docs/_build/html/_modules/mambular/base_models/embedding_regressor.html deleted file mode 100644 index d568dfc..0000000 --- a/docs/_build/html/_modules/mambular/base_models/embedding_regressor.html +++ /dev/null @@ -1,475 +0,0 @@ - - - - - - mambular.base_models.embedding_regressor — mambular 26.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mambular.base_models.embedding_regressor

-import torch
-import torch.nn as nn
-from ..utils.mamba_arch import Mamba
-from ..utils.config import MambularConfig
-import pytorch_lightning as pl
-
-
-
[docs]class BaseEmbeddingMambularRegressor(pl.LightningModule): - """ - A specialized regression module for protein data, built on PyTorch Lightning and integrating the Mamba architecture. - It supports embeddings for categorical features and can process raw or embedded numerical features, making it suitable - for complex protein sequence data. - - Parameters - ---------- - config : MambularConfig - Configuration parameters for the model architecture. - cat_feature_info : dict, optional - Information about categorical features, mapping feature names to the number of unique categories. Defaults to None. - num_feature_info : dict, optional - Information about numerical features, mapping feature names to their number of dimensions after embedding. Defaults to None. - lr : float, optional - Learning rate for the optimizer. Defaults to 1e-03. - lr_patience : int, optional - Number of epochs with no improvement after which learning rate will be reduced. Defaults to 10. - weight_decay : float, optional - Weight decay coefficient for regularization in the optimizer. Defaults to 0.025. - lr_factor : float, optional - Factor by which the learning rate will be reduced by the scheduler. Defaults to 0.75. - seq_size : int, optional - Size of sequence chunks for processing numerical features. Relevant when `raw_embeddings` is False. - raw_embeddings : bool, optional - Indicates whether to use raw numerical features directly or to process them into embeddings. Defaults to False. - - Attributes - ---------- - mamba : Mamba - The core neural network module implementing the Mamba architecture. - norm_f : nn.Module - Normalization layer applied after the Mamba block. - tabular_head : nn.Linear - Final linear layer mapping the features to the regression target. - loss_fct : nn.MSELoss - The loss function for regression tasks. - - Methods - ------- - forward(cat_features, num_features) - Defines the forward pass of the model. - training_step(batch, batch_idx) - Processes a single batch during training. - validation_step(batch, batch_idx) - Processes a single batch during validation. - configure_optimizers() - Sets up the model's optimizer and learning rate scheduler. - """ - - def __init__( - self, - config: MambularConfig, - cat_feature_info: dict = None, - num_feature_info: dict = None, - lr=1e-03, - lr_patience=10, - weight_decay=0.025, - lr_factor=0.75, - seq_size: int = 20, - raw_embeddings=False, - ): - super().__init__() - - self.config = config - self.lr = lr - self.lr_patience = lr_patience - self.weight_decay = weight_decay - self.lr_factor = lr_factor - self.cat_feature_info = cat_feature_info - self.num_feature_info = num_feature_info - self.seq_size = seq_size - self.raw_embeddings = raw_embeddings - - activations = { - "relu": nn.ReLU(), - "tanh": nn.Tanh(), - "sigmoid": nn.Sigmoid(), - "leaky_relu": nn.LeakyReLU(), - "elu": nn.ELU(), - "selu": nn.SELU(), - "gelu": nn.GELU(), - "softplus": nn.Softplus(), - "leakyrelu": nn.LeakyReLU(), - "linear": nn.Identity(), - } - - self.embedding_activation = activations.get( - self.config.num_embedding_activation.lower() - ) - if self.embedding_activation is None: - raise ValueError( - f"Unsupported activation function: {self.config.num_embedding_activation}" - ) - - if not self.raw_embeddings: - data_size = len(num_feature_info.items()) - num_embedding_modules = data_size // self.seq_size - self.num_embeddings = nn.ModuleList( - [ - nn.Sequential( - nn.Linear(self.seq_size, self.config.d_model, bias=False), - self.embedding_activation, # Example using ReLU as the activation function, change as needed - ) - for _ in range(num_embedding_modules) - ] - ) - else: - data_size = len(num_feature_info.items()) - num_embedding_modules = data_size - self.num_embeddings = nn.ModuleList( - [ - nn.Sequential( - nn.Linear(1, self.config.d_model, bias=False), - self.embedding_activation, # Example using ReLU as the activation function, change as needed - ) - for _ in range(num_embedding_modules) - ] - ) - - self.cat_embeddings = nn.ModuleList( - [ - nn.Embedding(num_categories + 1, self.config.d_model) - for feature_name, num_categories in cat_feature_info.items() - ] - ) - - self.mamba = Mamba(self.config) - self.norm_f = self.config.norm(self.config.d_model) - mlp_activation_fn = activations.get( - self.config.tabular_head_activation.lower(), nn.Identity() - ) - - # Dynamically create MLP layers based on config.tabular_units - mlp_layers = [] - input_dim = self.config.d_model # Initial input dimension - - # Iterate over the specified units for each layer in the MLP - for units in self.config.tabular_head_units: - mlp_layers.append(nn.Linear(input_dim, units)) - mlp_layers.append(mlp_activation_fn) - mlp_layers.append(nn.Dropout(self.config.tabular_head_dropout)) - input_dim = units - - # Add the final linear layer to map to a single output value - mlp_layers.append(nn.Linear(input_dim, 1)) - - # Combine all layers into a Sequential module - self.tabular_head = nn.Sequential(*mlp_layers) - - self.pooling_method = self.config.pooling_method - self.cls_token = nn.Parameter(torch.zeros(1, 1, self.config.d_model)) - - if self.config.layer_norm_after_embedding: - self.embedding_norm = nn.LayerNorm(self.config.d_model) - - self.loss_fct = nn.MSELoss() - -
[docs] def forward(self, cat_features, num_features): - """ - Defines the forward pass of the model, processing both categorical and numerical features, - and returning regression predictions. - - Parameters - ---------- - cat_features : Tensor - Tensor containing the categorical features. - num_features : Tensor - Tensor containing the numerical features or raw sequence data, depending on `raw_embeddings`. - - Returns - ------- - Tensor - The output predictions of the model for regression tasks. - """ - batch_size = ( - cat_features[0].size(0) if cat_features != [] else num_features[0].size(0) - ) - cls_tokens = self.cls_token.expand(batch_size, -1, -1) - # Process categorical features if present - if not self.raw_embeddings: - if len(self.cat_embeddings) > 0 and cat_features: - cat_embeddings = [ - emb(cat_features[i]) for i, emb in enumerate(self.cat_embeddings) - ] - cat_embeddings = torch.stack(cat_embeddings, dim=1) - cat_embeddings = torch.squeeze(cat_embeddings, dim=2) - else: - cat_embeddings = None - - if len(self.num_embeddings) > 0 and num_features: - num_embeddings = [] - # Iterate through the num_embeddings, taking slices of num_features for each - for i, emb in enumerate(self.num_embeddings): - # Calculate start and end indices for slicing the list - start_idx = i * self.seq_size - end_idx = start_idx + self.seq_size - - # Slice the num_features list to get the current chunk - current_chunk = num_features[start_idx:end_idx] - - # If the current_chunk is not empty, process it - if current_chunk: - # Concatenate tensors in the current chunk along dimension 1 - chunk_tensor = torch.cat(current_chunk, dim=1) - # Apply the embedding layer to the chunk_tensor - num_embeddings.append(emb(chunk_tensor)) - - # Stack the resulting embeddings along the second dimension if num_embeddings is not empty - if num_embeddings: - num_embeddings = torch.stack(num_embeddings, dim=1) - else: - num_embeddings = None - - else: - # Process categorical features if present - if len(self.cat_embeddings) > 0 and cat_features: - cat_embeddings = [ - emb(cat_features[i]) for i, emb in enumerate(self.cat_embeddings) - ] - cat_embeddings = torch.stack(cat_embeddings, dim=1) - cat_embeddings = torch.squeeze(cat_embeddings, dim=2) - if self.config.layer_norm_after_embedding: - cat_embeddings = self.embedding_norm(cat_embeddings) - else: - cat_embeddings = None - - # Process numerical features if present - if len(self.num_embeddings) > 0 and num_features: - num_embeddings = [ - emb(num_features[i]) for i, emb in enumerate(self.num_embeddings) - ] - num_embeddings = torch.stack(num_embeddings, dim=1) - if self.config.layer_norm_after_embedding: - num_embeddings = self.embedding_norm(num_embeddings) - else: - num_embeddings = None - - # Combine embeddings if both types are present, otherwise use whichever is available - if cat_embeddings is not None and num_embeddings is not None: - x = torch.cat([cls_tokens, cat_embeddings, num_embeddings], dim=1) - elif cat_embeddings is not None: - x = torch.cat([cls_tokens, cat_embeddings], dim=1) - elif num_embeddings is not None: - x = torch.cat([cls_tokens, num_embeddings], dim=1) - else: - raise ValueError("No features provided to the model.") - - x = self.mamba(x) - - # Apply pooling based on the specified method - if self.pooling_method == "avg": - x = torch.mean(x, dim=1) - elif self.pooling_method == "max": - x, _ = torch.max(x, dim=1) - elif self.pooling_method == "sum": - x = torch.sum(x, dim=1) - elif self.pooling_method == "cls_token": - x = x[:, 0] - else: - raise ValueError(f"Invalid pooling method: {self.pooling_method}") - - x = self.norm_f(x) - preds = self.tabular_head(x) - - return preds
- -
[docs] def training_step(self, batch, batch_idx): - """ - Processes a single batch during training, computes the loss, and logs training metrics. - - Parameters - ---------- - batch : tuple - A batch of data from the DataLoader, containing numerical features, categorical features, and labels. - batch_idx : int - The index of the batch within the epoch. - - Returns - ------- - Tensor - The computed loss for the batch. - """ - num_features, cat_features, labels = batch - preds = self(num_features, cat_features) - - loss = self.loss_fct(preds.squeeze(), labels.float()) - self.log( - "train_loss", - loss, - on_step=True, - on_epoch=True, - prog_bar=True, - logger=True, - ) - return loss
- -
[docs] def validation_step(self, batch, batch_idx): - """ - Processes a single batch during validation, computes the loss, and logs validation metrics. - - Parameters - ---------- - batch : tuple - A batch of data from the DataLoader, containing numerical features, categorical features, and labels. - batch_idx : int - The index of the batch within the epoch. - """ - num_features, cat_features, labels = batch - preds = self(num_features, cat_features) - - loss = self.loss_fct(preds.squeeze(), labels.float()) - self.log( - "val_loss", - loss, - on_step=True, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - return loss
- -
[docs] def configure_optimizers(self): - """ - Sets up the model's optimizer and learning rate scheduler based on the configurations provided. - - Returns - ------- - dict - A dictionary containing the optimizer and lr_scheduler configurations. - """ - optimizer = torch.optim.Adam( - self.parameters(), lr=self.lr, weight_decay=self.config.weight_decay - ) - scheduler = { - "scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau( - optimizer, - mode="min", - factor=self.lr_factor, - patience=self.lr_patience, - verbose=True, - ), - "monitor": "val_loss", # Name of the metric to monitor - "interval": "epoch", - "frequency": 1, - } - - return {"optimizer": optimizer, "lr_scheduler": scheduler}
-
- -
-
-
- -
- -
-

© Copyright 2024, Christoph Weisser.

-
- - Built with Sphinx using a - theme - provided by Read the Docs. - - -
-
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/mambular/base_models/regressor.html b/docs/_build/html/_modules/mambular/base_models/regressor.html deleted file mode 100644 index 319392f..0000000 --- a/docs/_build/html/_modules/mambular/base_models/regressor.html +++ /dev/null @@ -1,424 +0,0 @@ - - - - - - mambular.base_models.regressor — mambular 26.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mambular.base_models.regressor

-import torch
-import torch.nn as nn
-from ..utils.mamba_arch import Mamba
-from ..utils.config import MambularConfig
-import pytorch_lightning as pl
-
-
-
[docs]class BaseMambularRegressor(pl.LightningModule): - """ - A base regression module for tabular data built on PyTorch Lightning. It incorporates embeddings - for categorical and numerical features with a configurable architecture provided by MambularConfig. - This module is designed for regression tasks. - - Parameters - ---------- - config : MambularConfig - An instance of MambularConfig containing configuration parameters for the model architecture. - cat_feature_info : dict, optional - A dictionary mapping the names of categorical features to their number of unique categories. Defaults to None. - num_feature_info : dict, optional - A dictionary mapping the names of numerical features to their number of dimensions after embedding. Defaults to None. - lr : float, optional - The initial learning rate for the optimizer. Defaults to 1e-03. - lr_patience : int, optional - The number of epochs with no improvement after which learning rate will be reduced. Defaults to 10. - weight_decay : float, optional - Weight decay (L2 penalty) coefficient. Defaults to 0.025. - lr_factor : float, optional - Factor by which the learning rate will be reduced. Defaults to 0.75. - - Attributes - ---------- - mamba : Mamba - The core neural network module implementing the Mamba architecture. - norm_f : nn.Module - Normalization layer applied after the Mamba block. - tabular_head : nn.Linear - Final linear layer mapping the features to a single output for regression tasks. - train_mse : torchmetrics.MeanSquaredError - Metric computation module for training Mean Squared Error. - val_mse : torchmetrics.MeanSquaredError - Metric computation module for validation Mean Squared Error. - loss_fct : torch.nn.MSELoss - The loss function for regression tasks. - - Methods - ------- - forward(cat_features, num_features) - Defines the forward pass of the model. - training_step(batch, batch_idx) - Processes a single batch during training. - validation_step(batch, batch_idx) - Processes a single batch during validation. - configure_optimizers() - Sets up the model's optimizer and learning rate scheduler. - """ - - - def __init__( - self, - config: MambularConfig, - cat_feature_info: dict = None, - num_feature_info: dict = None, - lr=1e-03, - lr_patience=10, - weight_decay=0.025, - lr_factor=0.75, - ): - super().__init__() - - self.config = config - self.lr = lr - self.lr_patience = lr_patience - self.weight_decay = weight_decay - self.lr_factor = lr_factor - self.cat_feature_info = cat_feature_info - self.num_feature_info = num_feature_info - - activations = { - "relu": nn.ReLU(), - "tanh": nn.Tanh(), - "sigmoid": nn.Sigmoid(), - "leaky_relu": nn.LeakyReLU(), - "elu": nn.ELU(), - "selu": nn.SELU(), - "gelu": nn.GELU(), - "softplus": nn.Softplus(), - "leakyrelu": nn.LeakyReLU(), - "linear": nn.Identity(), - } - - self.embedding_activation = activations.get( - self.config.num_embedding_activation.lower() - ) - if self.embedding_activation is None: - raise ValueError( - f"Unsupported activation function: {self.config.num_embedding_activation}" - ) - - self.num_embeddings = nn.ModuleList( - [ - nn.Sequential( - nn.Linear(input_shape, self.config.d_model, bias=False), - self.embedding_activation, # Example using ReLU as the activation function, change as needed - ) - for feature_name, input_shape in num_feature_info.items() - ] - ) - - # Create embedding layers for categorical features based on cat_feature_info - self.cat_embeddings = nn.ModuleList( - [ - nn.Embedding(num_categories + 1, self.config.d_model) - for feature_name, num_categories in cat_feature_info.items() - ] - ) - - self.mamba = Mamba(self.config) - self.norm_f = self.config.norm(self.config.d_model) - mlp_activation_fn = activations.get( - self.config.tabular_head_activation.lower(), nn.Identity() - ) - - # Dynamically create MLP layers based on config.tabular_units - mlp_layers = [] - input_dim = self.config.d_model # Initial input dimension - - # Iterate over the specified units for each layer in the MLP - for units in self.config.tabular_head_units: - mlp_layers.append(nn.Linear(input_dim, units)) - mlp_layers.append(mlp_activation_fn) - mlp_layers.append(nn.Dropout(self.config.tabular_head_dropout)) - input_dim = units - - # Add the final linear layer to map to a single output value - mlp_layers.append(nn.Linear(input_dim, 1)) - - # Combine all layers into a Sequential module - self.tabular_head = nn.Sequential(*mlp_layers) - - self.pooling_method = self.config.pooling_method - self.cls_token = nn.Parameter(torch.zeros(1, 1, self.config.d_model)) - - self.loss_fct = nn.MSELoss() - - if self.config.layer_norm_after_embedding: - self.embedding_norm = nn.LayerNorm(self.config.d_model) - -
[docs] def forward(self, cat_features, num_features): - """ - Defines the forward pass of the regressor. - - Parameters - ---------- - cat_features : Tensor - Tensor containing the categorical features. - num_features : Tensor - Tensor containing the numerical features. - - Returns - ------- - Tensor - The output predictions of the model for regression tasks. - """ - - batch_size = ( - cat_features[0].size(0) if cat_features != [] else num_features[0].size(0) - ) - cls_tokens = self.cls_token.expand(batch_size, -1, -1) - - # Process categorical features if present - if len(self.cat_embeddings) > 0 and cat_features: - cat_embeddings = [ - emb(cat_features[i]) for i, emb in enumerate(self.cat_embeddings) - ] - cat_embeddings = torch.stack(cat_embeddings, dim=1) - cat_embeddings = torch.squeeze(cat_embeddings, dim=2) - if self.config.layer_norm_after_embedding: - cat_embeddings = self.embedding_norm(cat_embeddings) - else: - cat_embeddings = None - - # Process numerical features if present - if len(self.num_embeddings) > 0 and num_features: - num_embeddings = [ - emb(num_features[i]) for i, emb in enumerate(self.num_embeddings) - ] - num_embeddings = torch.stack(num_embeddings, dim=1) - if self.config.layer_norm_after_embedding: - num_embeddings = self.embedding_norm(num_embeddings) - else: - num_embeddings = None - - # Combine embeddings if both types are present, otherwise use whichever is available - - if cat_embeddings is not None and num_embeddings is not None: - x = torch.cat([cls_tokens, cat_embeddings, num_embeddings], dim=1) - elif cat_embeddings is not None: - x = torch.cat([cls_tokens, cat_embeddings], dim=1) - elif num_embeddings is not None: - x = torch.cat([cls_tokens, num_embeddings], dim=1) - else: - raise ValueError("No features provided to the model.") - - x = self.mamba(x) - - # Apply pooling based on the specified method - if self.pooling_method == "avg": - x = torch.mean(x, dim=1) - elif self.pooling_method == "max": - x, _ = torch.max(x, dim=1) - elif self.pooling_method == "sum": - x = torch.sum(x, dim=1) - elif self.pooling_method == "cls_token": - x = x[:, 0] - else: - raise ValueError(f"Invalid pooling method: {self.pooling_method}") - - x = self.norm_f(x) - preds = self.tabular_head(x) - - return preds
- -
[docs] def training_step(self, batch, batch_idx): - """ - Defines the forward pass of the regressor. - - Parameters - ---------- - cat_features : Tensor - Tensor containing the categorical features. - num_features : Tensor - Tensor containing the numerical features. - - Returns - ------- - Tensor - The output predictions of the model for regression tasks. - """ - num_features, cat_features, labels = batch - preds = self(num_features, cat_features) - - loss = self.loss_fct(preds.squeeze(), labels.float()) - self.log( - "train_loss", - loss, - on_step=True, - on_epoch=True, - prog_bar=True, - logger=True, - ) - return loss
- -
[docs] def validation_step(self, batch, batch_idx): - """ - Processes a single batch during validation, computes the loss, and logs validation metrics. - - Parameters - ---------- - batch : tuple - A batch of data from the DataLoader, containing numerical features, categorical features, and labels. - batch_idx : int - The index of the batch within the epoch. - """ - num_features, cat_features, labels = batch - preds = self(num_features, cat_features) - - loss = self.loss_fct(preds.squeeze(), labels.float()) - self.log( - "val_loss", - loss, - on_step=True, - on_epoch=True, - prog_bar=True, - logger=True, - ) - - return loss
- -
[docs] def configure_optimizers(self): - """ - Sets up the model's optimizer and learning rate scheduler based on the configurations provided. - - Returns - ------- - dict - A dictionary containing the optimizer and lr_scheduler configurations. - """ - optimizer = torch.optim.Adam( - self.parameters(), lr=self.lr, weight_decay=self.config.weight_decay - ) - scheduler = { - "scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau( - optimizer, - mode="min", - factor=self.lr_factor, - patience=self.lr_patience, - verbose=True, - ), - "monitor": "val_loss", # Name of the metric to monitor - "interval": "epoch", - "frequency": 1, - } - - return {"optimizer": optimizer, "lr_scheduler": scheduler}
-
- -
-
-
- -
- -
-

© Copyright 2024, Christoph Weisser.

-
- - Built with Sphinx using a - theme - provided by Read the Docs. - - -
-
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/mambular/models/sklearn_classifier.html b/docs/_build/html/_modules/mambular/models/sklearn_classifier.html deleted file mode 100644 index 68313be..0000000 --- a/docs/_build/html/_modules/mambular/models/sklearn_classifier.html +++ /dev/null @@ -1,706 +0,0 @@ - - - - - - mambular.models.sklearn_classifier — mambular 26.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mambular.models.sklearn_classifier

-from sklearn.model_selection import train_test_split
-import pytorch_lightning as pl
-import torch
-from torch.utils.data import DataLoader
-import numpy as np
-from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
-from ..base_models.classifier import BaseMambularClassifier
-from ..utils.config import MambularConfig
-from ..utils.preprocessor import Preprocessor
-from ..utils.dataset import MambularDataModule, MambularDataset
-from sklearn.base import BaseEstimator
-import pandas as pd
-from sklearn.metrics import accuracy_score
-
-
-
[docs]class MambularClassifier(BaseEstimator): - """ - A classifier that mimics scikit-learn's API using PyTorch Lightning and a custom architecture. - - This classifier is designed to work with tabular data and provides a flexible interface for specifying model - configurations and preprocessing steps. It integrates smoothly with scikit-learn's utilities, such as cross-validation - and grid search. - - Parameters - ---------- - **kwargs : Various - Accepts any number of keyword arguments that are passed to the MambularConfig and Preprocessor classes. - Known configuration arguments for the model are extracted based on a predefined list, and the rest are - passed to the Preprocessor. - - Attributes - ---------- - config : MambularConfig - Configuration object that holds model-specific settings. - preprocessor : Preprocessor - Preprocessor object for handling feature preprocessing like normalization and encoding. - model : BaseMambularClassifier or None - The underlying PyTorch Lightning model, instantiated upon calling the `fit` method. - """ - - def __init__(self, **kwargs): - # Known config arguments - print("Received kwargs:", kwargs) - config_arg_names = [ - "d_model", - "n_layers", - "dt_rank", - "output_dimension", - "pooling_method", - "norm", - "cls", - "dt_min", - "dt_max", - "dropout", - "bias", - "weight_decay", - "conv_bias", - "d_state", - "expand_factor", - "d_conv", - "dt_init", - "dt_scale", - "dt_init_floor", - "tabular_head_units", - "tabular_head_activation", - "tabular_head_dropout", - "num_emebedding_activation", - "layer_norm_after_embedding", - ] - self.config_kwargs = {k: v for k, v in kwargs.items() if k in config_arg_names} - self.config = MambularConfig(**self.config_kwargs) - - # The rest are assumed to be preprocessor arguments - preprocessor_kwargs = { - k: v for k, v in kwargs.items() if k not in config_arg_names - } - self.preprocessor = Preprocessor(**preprocessor_kwargs) - self.model = None - - def get_params(self, deep=True): - """ - Get parameters for this estimator. - - Parameters - ---------- - deep : bool, default=True - If True, will return the parameters for this estimator and contained subobjects that are estimators. - - Returns - ------- - params : dict - Parameter names mapped to their values. - """ - - params = self.config_kwargs # Parameters used to initialize MambularConfig - - # If deep=True, include parameters from nested components like preprocessor - if deep: - # Assuming Preprocessor has a get_params method - preprocessor_params = { - "preprocessor__" + key: value - for key, value in self.preprocessor.get_params().items() - } - params.update(preprocessor_params) - - return params - - def set_params(self, **parameters): - """ - Set the parameters of this estimator. - - Parameters - ---------- - **parameters : dict - Estimator parameters. - - Returns - ------- - self : object - Estimator instance. - """ - # Update config_kwargs with provided parameters - valid_config_keys = self.config_kwargs.keys() - config_updates = {k: v for k, v in parameters.items() if k in valid_config_keys} - self.config_kwargs.update(config_updates) - - # Update the config object - for key, value in config_updates.items(): - setattr(self.config, key, value) - - # Handle preprocessor parameters (prefixed with 'preprocessor__') - preprocessor_params = { - k.split("__")[1]: v - for k, v in parameters.items() - if k.startswith("preprocessor__") - } - if preprocessor_params: - # Assuming Preprocessor has a set_params method - self.preprocessor.set_params(**preprocessor_params) - - return self - - def split_data(self, X, y, val_size, random_state): - """ - Split the dataset into training and validation sets. - - Parameters - ---------- - X : array-like of shape (n_samples, n_features) - The input samples. - y : array-like of shape (n_samples,) - The target values. - val_size : float - The proportion of the dataset to include in the validation split. - random_state : int - Controls the shuffling applied to the data before applying the split. - - Returns - ------- - X_train, X_val, y_train, y_val : arrays - The split datasets. - """ - X_train, X_val, y_train, y_val = train_test_split( - X, y, test_size=val_size, random_state=random_state - ) - - return X_train, X_val, y_train, y_val - - def preprocess_data(self, X_train, y_train, X_val, y_val, batch_size, shuffle): - """ - Preprocess the training and validation data and create corresponding DataLoaders. - - Parameters - ---------- - X_train : array-like of shape (n_samples, n_features) - The training input samples. - y_train : array-like of shape (n_samples,) - The training target values. - X_val : array-like of shape (n_samples, n_features) - The validation input samples. - y_val : array-like of shape (n_samples,) - The validation target values. - batch_size : int - Size of mini-batches for the DataLoader. - shuffle : bool - Whether to shuffle the training data before splitting into batches. - - Returns - ------- - data_module : MambularDataModule - An instance of MambularDataModule containing training and validation DataLoaders. - """ - - train_preprocessed_data = self.preprocessor.fit_transform(X_train, y_train) - val_preprocessed_data = self.preprocessor.transform(X_val) - - # Update feature info based on the actual processed data - ( - self.cat_feature_info, - self.num_feature_info, - ) = self.preprocessor.get_feature_info() - - # Initialize lists for tensors - train_cat_tensors = [] - train_num_tensors = [] - val_cat_tensors = [] - val_num_tensors = [] - - # Populate tensors for categorical features, if present in processed data - for key in self.cat_feature_info: - cat_key = "cat_" + key # Assuming categorical keys are prefixed with 'cat_' - if cat_key in train_preprocessed_data: - train_cat_tensors.append( - torch.tensor(train_preprocessed_data[cat_key], dtype=torch.long) - ) - if cat_key in val_preprocessed_data: - val_cat_tensors.append( - torch.tensor(val_preprocessed_data[cat_key], dtype=torch.long) - ) - - binned_key = "num_" + key # for binned features - if binned_key in train_preprocessed_data: - train_cat_tensors.append( - torch.tensor(train_preprocessed_data[binned_key], dtype=torch.long) - ) - - if binned_key in val_preprocessed_data: - val_cat_tensors.append( - torch.tensor(val_preprocessed_data[binned_key], dtype=torch.long) - ) - - # Populate tensors for numerical features, if present in processed data - for key in self.num_feature_info: - num_key = "num_" + key # Assuming numerical keys are prefixed with 'num_' - if num_key in train_preprocessed_data: - train_num_tensors.append( - torch.tensor(train_preprocessed_data[num_key], dtype=torch.float) - ) - if num_key in val_preprocessed_data: - val_num_tensors.append( - torch.tensor(val_preprocessed_data[num_key], dtype=torch.float) - ) - - train_labels = torch.tensor(y_train, dtype=torch.long) - val_labels = torch.tensor(y_val, dtype=torch.long) - - # Create datasets - train_dataset = MambularDataset( - train_cat_tensors, train_num_tensors, train_labels, regression=False - ) - val_dataset = MambularDataset( - val_cat_tensors, val_num_tensors, val_labels, regression=False - ) - - # Create dataloaders - train_dataloader = DataLoader( - train_dataset, batch_size=batch_size, shuffle=shuffle - ) - val_dataloader = DataLoader(val_dataset, batch_size=batch_size) - - return MambularDataModule(train_dataloader, val_dataloader) - - def preprocess_test_data(self, X): - """ - Preprocesses the test data and creates tensors for categorical and numerical features. - - Parameters - ---------- - X : DataFrame or array-like, shape (n_samples, n_features) - Test feature set. - - Returns - ------- - cat_tensors : list of Tensors - List of tensors for each categorical feature. - num_tensors : list of Tensors - List of tensors for each numerical feature. - """ - processed_data = self.preprocessor.transform(X) - - # Initialize lists for tensors - cat_tensors = [] - num_tensors = [] - - # Populate tensors for categorical features - for key in self.cat_feature_info: - cat_key = "cat_" + key # Assuming categorical keys are prefixed with 'cat_' - if cat_key in processed_data: - cat_tensors.append( - torch.tensor(processed_data[cat_key], dtype=torch.long) - ) - - binned_key = "num_" + key # for binned features - if binned_key in processed_data: - cat_tensors.append( - torch.tensor(processed_data[binned_key], dtype=torch.long) - ) - - # Populate tensors for numerical features - for key in self.num_feature_info: - num_key = "num_" + key # Assuming numerical keys are prefixed with 'num_' - if num_key in processed_data: - num_tensors.append( - torch.tensor(processed_data[num_key], dtype=torch.float) - ) - - return cat_tensors, num_tensors - - def fit( - self, - X, - y, - val_size=0.2, - X_val=None, - y_val=None, - max_epochs=100, - random_state=101, - batch_size=64, - shuffle=True, - patience=10, - monitor="val_loss", - mode="min", - lr=1e-3, - lr_patience=10, - factor=0.75, - weight_decay=0.025, - **trainer_kwargs - ): - """ - Fit the model to the given training data, optionally using a separate validation set. - - Parameters - ---------- - X : array-like or pd.DataFrame of shape (n_samples, n_features) - The training input samples. - y : array-like of shape (n_samples,) or (n_samples, n_outputs) - The target values (class labels in classification, real numbers in regression). - val_size : float, default=0.2 - The proportion of the dataset to include in the validation split if `X_val` is None. Ignored if `X_val` is provided. - X_val : array-like or pd.DataFrame of shape (n_samples, n_features), optional - The validation input samples. If provided, `X` and `y` are not split and this data is used for validation. - y_val : array-like of shape (n_samples,) or (n_samples, n_outputs), optional - The validation target values. Required if `X_val` is provided. - max_epochs : int, default=100 - Maximum number of epochs for training. - random_state : int, default=101 - Seed used by the random number generator for shuffling the data if `X_val` is not provided. - batch_size : int, default=64 - Number of samples per gradient update. - shuffle : bool, default=True - Whether to shuffle the training data before each epoch if `X_val` is not provided. - patience : int, default=10 - Number of epochs with no improvement after which training will be stopped if using early stopping. - monitor : str, default="val_loss" - Quantity to be monitored for early stopping. - mode : str, default="min" - One of {"min", "max"}. In "min" mode, training will stop when the quantity monitored has stopped decreasing; in "max" mode, it will stop when the quantity monitored has stopped increasing. - lr : float, default=1e-3 - Learning rate for the optimizer. - lr_patience : int, default=10 - Number of epochs with no improvement after which the learning rate will be reduced. - factor : float, default=0.75 - Factor by which the learning rate will be reduced. new_lr = lr * factor. - weight_decay : float, default=0.025 - Weight decay (L2 penalty) parameter. - **trainer_kwargs : dict - Additional keyword arguments to be passed to the PyTorch Lightning Trainer constructor. - - Returns - ------- - self : object - The fitted estimator. - """ - - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - if X_val: - if not isinstance(X_val, pd.DataFrame): - X_val = pd.DataFrame(X_val) - - num_classes = len(np.unique(y)) - - if not X_val: - X_train, X_val, y_train, y_val = self.split_data( - X, y, val_size, random_state - ) - - data_module = self.preprocess_data( - X_train, y_train, X_val, y_val, batch_size, shuffle - ) - - self.model = BaseMambularClassifier( - num_classes=num_classes, - config=self.config, - cat_feature_info=self.cat_feature_info, - num_feature_info=self.num_feature_info, - lr=lr, - lr_patience=lr_patience, - lr_factor=factor, - weight_decay=weight_decay, - ) - - early_stop_callback = EarlyStopping( - monitor=monitor, min_delta=0.00, patience=patience, verbose=False, mode=mode - ) - - checkpoint_callback = ModelCheckpoint( - monitor="val_loss", # Adjust according to your validation metric - mode="min", - save_top_k=1, - dirpath="model_checkpoints", # Specify the directory to save checkpoints - filename="best_model", - ) - - # Initialize the trainer and train the model - trainer = pl.Trainer( - max_epochs=max_epochs, - callbacks=[early_stop_callback, checkpoint_callback], - **trainer_kwargs - ) - trainer.fit(self.model, data_module) - - best_model_path = checkpoint_callback.best_model_path - if best_model_path: - checkpoint = torch.load(best_model_path) - self.model.load_state_dict(checkpoint["state_dict"]) - - return self - - def predict(self, X): - """ - Predict the class labels for the given input samples. - - Parameters - ---------- - X : array-like or pd.DataFrame of shape (n_samples, n_features) - The input samples to predict. - - Returns - ------- - predictions : ndarray of shape (n_samples,) - Predicted class labels for each input sample. - - Notes - ----- - The method preprocesses the input data using the same preprocessor used during training, - sets the model to evaluation mode, and then performs inference to predict the class labels. - The predictions are converted from a PyTorch tensor to a NumPy array before being returned. - """ - # Preprocess the data - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - device = next(self.model.parameters()).device - cat_tensors, num_tensors = self.preprocess_test_data(X) - if isinstance(cat_tensors, list): - cat_tensors = [tensor.to(device) for tensor in cat_tensors] - else: - cat_tensors = cat_tensors.to(device) - - if isinstance(num_tensors, list): - num_tensors = [tensor.to(device) for tensor in num_tensors] - else: - num_tensors = num_tensors.to(device) - - # Set the model to evaluation mode - self.model.eval() - - # Perform inference - with torch.no_grad(): - logits = self.model(cat_tensors, num_tensors) - predictions = torch.argmax(logits, dim=1) - - # Convert predictions to NumPy array and return - return predictions.cpu().numpy() - - def predict_proba(self, X): - """ - Predict class probabilities for the given input samples. - - Example - ------- - from sklearn.metrics import accuracy_score, precision_score, f1_score, roc_auc_score - - # Define the metrics you want to evaluate - metrics = { - 'Accuracy': (accuracy_score, False), - 'Precision': (precision_score, False), - 'F1 Score': (f1_score, False), - 'AUC Score': (roc_auc_score, True) - } - - # Assuming 'X_test' and 'y_test' are your test dataset and labels - # Evaluate using the specified metrics - results = classifier.evaluate(X_test, y_test, metrics=metrics) - - Parameters - ---------- - X : array-like or pd.DataFrame of shape (n_samples, n_features) - The input samples for which to predict class probabilities. - - Returns - ------- - probabilities : ndarray of shape (n_samples, n_classes) - Predicted class probabilities for each input sample. - - Notes - ----- - The method preprocesses the input data using the same preprocessor used during training, - sets the model to evaluation mode, and then performs inference to predict the class probabilities. - Softmax is applied to the logits to obtain probabilities, which are then converted from a PyTorch tensor - to a NumPy array before being returned. - """ - # Preprocess the data - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - device = next(self.model.parameters()).device - cat_tensors, num_tensors = self.preprocess_test_data(X) - if isinstance(cat_tensors, list): - cat_tensors = [tensor.to(device) for tensor in cat_tensors] - else: - cat_tensors = cat_tensors.to(device) - - if isinstance(num_tensors, list): - num_tensors = [tensor.to(device) for tensor in num_tensors] - else: - num_tensors = num_tensors.to(device) - - # Set the model to evaluation mode - self.model.eval() - - # Perform inference - with torch.no_grad(): - logits = self.model(cat_tensors, num_tensors) - probabilities = torch.softmax(logits, dim=1) - - # Convert probabilities to NumPy array and return - return probabilities.cpu().numpy() - - def evaluate(self, X, y_true, metrics=None): - """ - Evaluate the model on the given data using specified metrics. - - Parameters - ---------- - X : array-like or pd.DataFrame of shape (n_samples, n_features) - The input samples to predict. - y_true : array-like of shape (n_samples,) - The true class labels against which to evaluate the predictions. - metrics : dict - A dictionary where keys are metric names and values are tuples containing the metric function - and a boolean indicating whether the metric requires probability scores (True) or class labels (False). - - Returns - ------- - scores : dict - A dictionary with metric names as keys and their corresponding scores as values. - - Notes - ----- - This method uses either the `predict` or `predict_proba` method depending on the metric requirements. - """ - # Ensure input is in the correct format - if metrics is None: - metrics = {"Accuracy": (accuracy_score, False)} - - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - - # Initialize dictionary to store results - scores = {} - - # Generate class probabilities if any metric requires them - if any(use_proba for _, use_proba in metrics.values()): - probabilities = self.predict_proba(X) - - # Generate class labels if any metric requires them - if any(not use_proba for _, use_proba in metrics.values()): - predictions = self.predict(X) - - # Compute each metric - for metric_name, (metric_func, use_proba) in metrics.items(): - if use_proba: - scores[metric_name] = metric_func(y_true, probabilities) - else: - scores[metric_name] = metric_func(y_true, predictions) - - return scores
-
- -
-
-
- -
- -
-

© Copyright 2024, Christoph Weisser.

-
- - Built with Sphinx using a - theme - provided by Read the Docs. - - -
-
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/mambular/models/sklearn_distributional.html b/docs/_build/html/_modules/mambular/models/sklearn_distributional.html deleted file mode 100644 index 55d6e19..0000000 --- a/docs/_build/html/_modules/mambular/models/sklearn_distributional.html +++ /dev/null @@ -1,675 +0,0 @@ - - - - - - mambular.models.sklearn_distributional — mambular 26.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mambular.models.sklearn_distributional

-from sklearn.model_selection import train_test_split
-import pytorch_lightning as pl
-import torch
-from torch.utils.data import DataLoader
-from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
-from ..base_models.distributional import BaseMambularLSS
-from ..utils.config import MambularConfig
-from ..utils.preprocessor import Preprocessor
-from ..utils.dataset import MambularDataModule, MambularDataset
-from sklearn.base import BaseEstimator
-import pandas as pd
-from ..utils.distributional_metrics import (
-    poisson_deviance,
-    gamma_deviance,
-    beta_brier_score,
-    dirichlet_error,
-    student_t_loss,
-    negative_binomial_deviance,
-    inverse_gamma_loss,
-)
-from sklearn.metrics import mean_squared_error, accuracy_score
-import numpy as np
-import properscoring as ps
-
-
-
[docs]class MambularLSS(BaseEstimator): - """ - MambularLSS is a machine learning estimator that is designed for structured data, - incorporating both preprocessing and a deep learning model. The estimator - integrates configurable components for data preprocessing and the neural network model, - facilitating end-to-end training and prediction workflows. - - The initialization of this class separates configuration arguments for the model and - the preprocessor, allowing for flexible adjustment of parameters. - - Attributes - ---------- - config : MambularConfig - Configuration object containing model-specific parameters. - preprocessor : Preprocessor - Preprocessor object for data preprocessing steps. - model : torch.nn.Module - The neural network model, initialized based on 'config'. - - Parameters - ---------- - **kwargs : Arbitrary keyword arguments, divided into configuration for the model and - preprocessing. Recognized keys include model parameters such as 'd_model', - 'n_layers', etc., and any additional keys are assumed to be preprocessor arguments. - """ - - def __init__(self, **kwargs): - # Known config arguments - config_arg_names = [ - "d_model", - "n_layers", - "dt_rank", - "output_dimension", - "pooling_method", - "norm", - "cls", - "dt_min", - "dt_max", - "dropout", - "bias", - "weight_decay", - "conv_bias", - "d_state", - "expand_factor", - "d_conv", - "dt_init", - "dt_scale", - "dt_init_floor", - "tabular_head_units", - "tabular_head_activation", - "tabular_head_dropout", - "num_emebedding_activation", - ] - config_kwargs = {k: v for k, v in kwargs.items() if k in config_arg_names} - self.config = MambularConfig(**config_kwargs) - - # The rest are assumed to be preprocessor arguments - preprocessor_kwargs = { - k: v for k, v in kwargs.items() if k not in config_arg_names - } - self.preprocessor = Preprocessor(**preprocessor_kwargs) - self.model = None - - def get_params(self, deep=True): - """ - Get parameters for this estimator, optionally including parameters from nested components - like the preprocessor. - - Parameters - ---------- - deep : bool, default=True - If True, return parameters of nested components. - - Returns - ------- - dict - A dictionary mapping parameter names to their values. For nested components, - parameter names are prefixed accordingly (e.g., 'preprocessor__<param_name>'). - """ - - params = self.config_kwargs # Parameters used to initialize MambularConfig - - # If deep=True, include parameters from nested components like preprocessor - if deep: - # Assuming Preprocessor has a get_params method - preprocessor_params = { - "preprocessor__" + key: value - for key, value in self.preprocessor.get_params().items() - } - params.update(preprocessor_params) - - return params - - def set_params(self, **parameters): - """ - Set the parameters of this estimator, allowing for modifications to both the configuration - and preprocessor parameters. Parameters not recognized as configuration arguments are - assumed to be preprocessor arguments. - - Parameters - ---------- - **parameters: Arbitrary keyword arguments where keys are parameter names and values - are the new parameter values. - - Returns - ------- - self: This instance with updated parameters. - """ - # Update config_kwargs with provided parameters - valid_config_keys = self.config_kwargs.keys() - config_updates = {k: v for k, v in parameters.items() if k in valid_config_keys} - self.config_kwargs.update(config_updates) - - # Update the config object - for key, value in config_updates.items(): - setattr(self.config, key, value) - - # Handle preprocessor parameters (prefixed with 'preprocessor__') - preprocessor_params = { - k.split("__")[1]: v - for k, v in parameters.items() - if k.startswith("preprocessor__") - } - if preprocessor_params: - # Assuming Preprocessor has a set_params method - self.preprocessor.set_params(**preprocessor_params) - - return self - - def split_data(self, X, y, val_size, random_state): - """ - Split the dataset into training and validation sets. - - Parameters - ---------- - X : array-like - Features of the dataset. - y : array-like - Target values. - val_size : float - The proportion of the dataset to include in the validation split. - random_state : int, optional - The seed used by the random number generator for reproducibility. - - Returns - ------- - tuple - A tuple containing split datasets (X_train, X_val, y_train, y_val). - """ - X_train, X_val, y_train, y_val = train_test_split( - X, y, test_size=val_size, random_state=random_state - ) - - return X_train, X_val, y_train, y_val - - def preprocess_data(self, X_train, y_train, X_val, y_val, batch_size, shuffle): - """ - Preprocess the training and validation data, fit the preprocessor on the training data, - and transform both training and validation data. This method also initializes tensors - for categorical and numerical features and labels, and prepares DataLoader objects for - both datasets. - - Parameters - ---------- - X_train : array-like - Training features. - y_train : array-like - Training target values. - X_val : array-like - Validation features. - y_val : array-like - Validation target values. - batch_size : int - Batch size for DataLoader objects. - shuffle : bool - Whether to shuffle the training data in the DataLoader. - - Returns - ------- - MambularDataModule - An object containing DataLoaders for training and validation datasets. - """ - train_preprocessed_data = self.preprocessor.fit_transform(X_train, y_train) - val_preprocessed_data = self.preprocessor.transform(X_val) - - # Update feature info based on the actual processed data - ( - self.cat_feature_info, - self.num_feature_info, - ) = self.preprocessor.get_feature_info() - - # Initialize lists for tensors - train_cat_tensors = [] - train_num_tensors = [] - val_cat_tensors = [] - val_num_tensors = [] - - # Populate tensors for categorical features, if present in processed data - for key in self.cat_feature_info: - cat_key = "cat_" + key # Assuming categorical keys are prefixed with 'cat_' - if cat_key in train_preprocessed_data: - train_cat_tensors.append( - torch.tensor(train_preprocessed_data[cat_key], dtype=torch.long) - ) - if cat_key in val_preprocessed_data: - val_cat_tensors.append( - torch.tensor(val_preprocessed_data[cat_key], dtype=torch.long) - ) - - binned_key = "num_" + key # for binned features - if binned_key in train_preprocessed_data: - train_cat_tensors.append( - torch.tensor(train_preprocessed_data[binned_key], dtype=torch.long) - ) - - if binned_key in val_preprocessed_data: - val_cat_tensors.append( - torch.tensor(val_preprocessed_data[binned_key], dtype=torch.long) - ) - - # Populate tensors for numerical features, if present in processed data - for key in self.num_feature_info: - num_key = "num_" + key # Assuming numerical keys are prefixed with 'num_' - if num_key in train_preprocessed_data: - train_num_tensors.append( - torch.tensor(train_preprocessed_data[num_key], dtype=torch.float) - ) - if num_key in val_preprocessed_data: - val_num_tensors.append( - torch.tensor(val_preprocessed_data[num_key], dtype=torch.float) - ) - - train_labels = torch.tensor(y_train, dtype=torch.float) - val_labels = torch.tensor(y_val, dtype=torch.float) - - # Create datasets - train_dataset = MambularDataset( - train_cat_tensors, train_num_tensors, train_labels - ) - val_dataset = MambularDataset(val_cat_tensors, val_num_tensors, val_labels) - - # Create dataloaders - train_dataloader = DataLoader( - train_dataset, batch_size=batch_size, shuffle=shuffle - ) - val_dataloader = DataLoader(val_dataset, batch_size=batch_size) - - return MambularDataModule(train_dataloader, val_dataloader) - - def preprocess_test_data(self, X): - """ - Preprocess test data using the fitted preprocessor. This method prepares tensors for - categorical and numerical features based on the preprocessed test data. - - Parameters - ---------- - X : array-like - Test features to preprocess. - - Returns - ------- - tuple - A tuple containing lists of tensors for categorical and numerical features. - """ - processed_data = self.preprocessor.transform(X) - - # Initialize lists for tensors - cat_tensors = [] - num_tensors = [] - - # Populate tensors for categorical features - for key in self.cat_feature_info: - cat_key = "cat_" + key # Assuming categorical keys are prefixed with 'cat_' - if cat_key in processed_data: - cat_tensors.append( - torch.tensor(processed_data[cat_key], dtype=torch.long) - ) - - binned_key = "num_" + key # for binned features - if binned_key in processed_data: - cat_tensors.append( - torch.tensor(processed_data[binned_key], dtype=torch.long) - ) - - # Populate tensors for numerical features - for key in self.num_feature_info: - num_key = "num_" + key # Assuming numerical keys are prefixed with 'num_' - if num_key in processed_data: - num_tensors.append( - torch.tensor(processed_data[num_key], dtype=torch.float) - ) - - return cat_tensors, num_tensors - - def fit( - self, - X, - y, - family, - val_size=0.2, - X_val=None, - y_val=None, - max_epochs=100, - random_state=101, - batch_size=64, - shuffle=True, - patience=10, - monitor="val_loss", - mode="min", - lr=1e-3, - lr_patience=10, - factor=0.75, - weight_decay=0.025, - **trainer_kwargs, - ): - """ - Fits the model to the provided data, using the specified loss distribution family for the prediction task. - - Parameters - ---------- - X : DataFrame or array-like, shape (n_samples, n_features) - Training features. - y : array-like, shape (n_samples,) or (n_samples, n_targets) - Target values for training. - family : str - The name of the distribution family to use for the loss function. Examples include 'normal' for regression tasks. - val_size : float, default=0.2 - Proportion of the dataset to include in the validation split if `X_val` is None. - X_val : DataFrame or array-like, shape (n_samples, n_features), optional - Validation features. If provided, `X` and `y` are not split. - y_val : array-like, shape (n_samples,) or (n_samples, n_targets), optional - Validation target values. Required if `X_val` is provided. - max_epochs : int, default=100 - Maximum number of epochs for training. - random_state : int, default=101 - Seed used by the random number generator for shuffling the data. - batch_size : int, default=64 - Number of samples per gradient update. - shuffle : bool, default=True - Whether to shuffle the training data before each epoch. - patience : int, default=10 - Number of epochs with no improvement on the validation metric to wait before early stopping. - monitor : str, default="val_loss" - The metric to monitor for early stopping. - mode : str, default="min" - In 'min' mode, training will stop when the quantity monitored has stopped decreasing; - in 'max' mode, it will stop when the quantity monitored has stopped increasing. - lr : float, default=1e-3 - Learning rate for the optimizer. - lr_patience : int, default=10 - Number of epochs with no improvement on the validation metric to wait before reducing the learning rate. - factor : float, default=0.75 - Factor by which the learning rate will be reduced. - weight_decay : float, default=0.025 - Weight decay (L2 penalty) parameter. - **trainer_kwargs : dict - Additional keyword arguments for PyTorch Lightning's Trainer class. - - Returns - ------- - self : object - The fitted estimator. - """ - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - if X_val: - if not isinstance(X_val, pd.DataFrame): - X_val = pd.DataFrame(X_val) - - if not X_val: - X_train, X_val, y_train, y_val = self.split_data( - X, y, val_size, random_state - ) - - data_module = self.preprocess_data( - X_train, y_train, X_val, y_val, batch_size, shuffle - ) - - self.model = BaseMambularLSS( - family=family, - config=self.config, - cat_feature_info=self.cat_feature_info, - num_feature_info=self.num_feature_info, - lr=lr, - lr_patience=lr_patience, - lr_factor=factor, - weight_decay=weight_decay, - ) - - early_stop_callback = EarlyStopping( - monitor=monitor, min_delta=0.00, patience=patience, verbose=False, mode=mode - ) - - checkpoint_callback = ModelCheckpoint( - monitor="val_loss", - mode="min", - save_top_k=1, - dirpath="model_checkpoints", - filename="best_model", - ) - - # Initialize the trainer and train the model - trainer = pl.Trainer( - max_epochs=max_epochs, - callbacks=[early_stop_callback, checkpoint_callback], - **trainer_kwargs, - ) - trainer.fit(self.model, data_module) - - best_model_path = checkpoint_callback.best_model_path - if best_model_path: - checkpoint = torch.load(best_model_path) - self.model.load_state_dict(checkpoint["state_dict"]) - - return self - - def predict(self, X): - """ - Predicts target values for the given input samples using the fitted model. - - Parameters - ---------- - X : DataFrame or array-like, shape (n_samples, n_features) - The input samples for which to predict target values. - - Returns - ------- - predictions : ndarray, shape (n_samples,) or (n_samples, n_distributional_parameters) - The predicted target values. - """ - # Preprocess the data - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - device = next(self.model.parameters()).device - cat_tensors, num_tensors = self.preprocess_test_data(X) - if isinstance(cat_tensors, list): - cat_tensors = [tensor.to(device) for tensor in cat_tensors] - else: - cat_tensors = cat_tensors.to(device) - - if isinstance(num_tensors, list): - num_tensors = [tensor.to(device) for tensor in num_tensors] - else: - num_tensors = num_tensors.to(device) - - # Set the model to evaluation mode - self.model.eval() - - # Perform inference - with torch.no_grad(): - predictions = self.model(cat_tensors, num_tensors) - - # Convert predictions to NumPy array and return - return predictions.cpu().numpy() - - def evaluate(self, X, y_true, metrics=None, distribution_family=None): - """ - Evaluate the model on the given data using specified metrics tailored to the distribution type. - - Parameters - ---------- - X : DataFrame or array-like, shape (n_samples, n_features) - Input samples. - y_true : DataFrame or array-like, shape (n_samples,) or (n_samples, n_outputs) - True target values. - metrics : dict, optional - A dictionary where keys are metric names and values are the metric functions. - If None, default metrics based on the detected or specified distribution_family are used. - distribution_family : str, optional - Specifies the distribution family the model is predicting for. If None, it will attempt to infer based - on the model's settings. - - Returns - ------- - scores : dict - A dictionary with metric names as keys and their corresponding scores as values. - """ - # Infer distribution family from model settings if not provided - if distribution_family is None: - distribution_family = getattr(self.model, "distribution_family", "normal") - - # Setup default metrics if none are provided - if metrics is None: - metrics = self.get_default_metrics(distribution_family) - - # Make predictions - predictions = self.predict(X) - - # Initialize dictionary to store results - scores = {} - - # Compute each metric - for metric_name, metric_func in metrics.items(): - scores[metric_name] = metric_func(y_true, predictions) - - return scores - - def get_default_metrics(self, distribution_family): - """ - Provides default metrics based on the distribution family. - - Parameters - ---------- - distribution_family : str - The distribution family for which to provide default metrics. - - Returns - ------- - metrics : dict - A dictionary of default metric functions. - """ - default_metrics = { - "normal": { - "MSE": lambda y, pred: mean_squared_error(y, pred[:, 0]), - "CRPS": lambda y, pred: np.mean( - [ - ps.crps_gaussian(y[i], mu=pred[i, 0], sig=np.sqrt(pred[i, 1])) - for i in range(len(y)) - ] - ), - }, - "poisson": {"Poisson Deviance": poisson_deviance}, - "gamma": {"Gamma Deviance": gamma_deviance}, - "beta": {"Brier Score": beta_brier_score}, - "dirichlet": {"Dirichlet Error": dirichlet_error}, - "studentt": {"Student-T Loss": student_t_loss}, - "negativebinom": {"Negative Binomial Deviance": negative_binomial_deviance}, - "inversegamma": {"Inverse Gamma Loss": inverse_gamma_loss}, - "categorical": {"Accuracy": accuracy_score}, - } - return default_metrics.get(distribution_family, {})
-
- -
-
-
- -
- -
-

© Copyright 2024, Christoph Weisser.

-
- - Built with Sphinx using a - theme - provided by Read the Docs. - - -
-
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/mambular/models/sklearn_embedding_classifier.html b/docs/_build/html/_modules/mambular/models/sklearn_embedding_classifier.html deleted file mode 100644 index a9bb616..0000000 --- a/docs/_build/html/_modules/mambular/models/sklearn_embedding_classifier.html +++ /dev/null @@ -1,720 +0,0 @@ - - - - - - mambular.models.sklearn_embedding_classifier — mambular 26.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mambular.models.sklearn_embedding_classifier

-from sklearn.model_selection import train_test_split
-import pytorch_lightning as pl
-import torch
-from torch.utils.data import DataLoader
-from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
-from ..base_models.embedding_classifier import BaseEmbeddingMambularClassifier
-from ..utils.config import MambularConfig
-from ..utils.preprocessor import Preprocessor
-from ..utils.dataset import MambularDataModule, EmbeddingMambularDataset
-from sklearn.base import BaseEstimator
-import pandas as pd
-from sklearn.decomposition import PCA
-import numpy as np
-from sklearn.metrics import accuracy_score
-
-
-
[docs]class EmbeddingMambularClassifier(BaseEstimator): - """ - Provides an scikit-learn-like interface for the ProteinMambularClassifier, making it compatible with - scikit-learn's utilities and workflow. This class encapsulates the PyTorch Lightning model, preprocessing, - and data loading, offering methods for fitting, predicting, and probability estimation in a manner akin - to scikit-learn's API. - - Parameters - ---------- - **kwargs : Configuration parameters that can include both MambularConfig settings and preprocessing - options. Any unrecognized parameters are passed to the preprocessor. - - Attributes - ---------- - config : MambularConfig - Configuration object for the model, storing architecture-specific parameters. - preprocessor : Preprocessor - Object handling data preprocessing steps such as feature encoding and normalization. - model : ProteinMambularClassifier - The underlying neural network model, instantiated during the `fit` method. - """ - - def __init__(self, **kwargs): - # Known config arguments - config_arg_names = [ - "d_model", - "n_layers", - "dt_rank", - "output_dimension", - "pooling_method", - "norm", - "cls", - "dt_min", - "dt_max", - "dropout", - "bias", - "weight_decay", - "conv_bias", - "d_state", - "expand_factor", - "d_conv", - "dt_init", - "dt_scale", - "dt_init_floor", - ] - config_kwargs = {k: v for k, v in kwargs.items() if k in config_arg_names} - self.config = MambularConfig(**config_kwargs) - - # The rest are assumed to be preprocessor arguments - preprocessor_kwargs = { - k: v for k, v in kwargs.items() if k not in config_arg_names - } - self.preprocessor = Preprocessor(**preprocessor_kwargs) - self.model = None - - def get_params(self, deep=True): - """ - Get parameters for this estimator. - - Parameters - ---------- - deep : bool, default=True - If True, will return the parameters for this estimator and contained subobjects that are estimators. - - Returns - ------- - params : dict - Parameter names mapped to their values. - """ - params = self.config_kwargs # Parameters used to initialize MambularConfig - - # If deep=True, include parameters from nested components like preprocessor - if deep: - # Assuming Preprocessor has a get_params method - preprocessor_params = { - "preprocessor__" + key: value - for key, value in self.preprocessor.get_params().items() - } - params.update(preprocessor_params) - - return params - - def set_params(self, **parameters): - """ - Set the parameters of this estimator. - - Parameters - ---------- - **parameters : dict - Estimator parameters. - - Returns - ------- - self : object - Estimator instance. - """ - # Update config_kwargs with provided parameters - valid_config_keys = self.config_kwargs.keys() - config_updates = {k: v for k, v in parameters.items() if k in valid_config_keys} - self.config_kwargs.update(config_updates) - - # Update the config object - for key, value in config_updates.items(): - setattr(self.config, key, value) - - # Handle preprocessor parameters (prefixed with 'preprocessor__') - preprocessor_params = { - k.split("__")[1]: v - for k, v in parameters.items() - if k.startswith("preprocessor__") - } - if preprocessor_params: - # Assuming Preprocessor has a set_params method - self.preprocessor.set_params(**preprocessor_params) - - return self - - def split_data(self, X, y, val_size, random_state): - """ - Split the dataset into training and validation sets. - - Parameters - ---------- - X : array-like of shape (n_samples, n_features) - The input samples. - y : array-like of shape (n_samples,) - The target values. - val_size : float - The proportion of the dataset to include in the validation split. - random_state : int - Controls the shuffling applied to the data before applying the split. - - Returns - ------- - X_train, X_val, y_train, y_val : arrays - The split datasets. - """ - X_train, X_val, y_train, y_val = train_test_split( - X, y, test_size=val_size, random_state=random_state - ) - - return X_train, X_val, y_train, y_val - - def preprocess_data(self, X_train, y_train, X_val, y_val, batch_size, shuffle): - """ - Preprocess the training and validation data and create corresponding DataLoaders. - - Parameters - ---------- - X_train : array-like of shape (n_samples, n_features) - The training input samples. - y_train : array-like of shape (n_samples,) - The training target values. - X_val : array-like of shape (n_samples, n_features) - The validation input samples. - y_val : array-like of shape (n_samples,) - The validation target values. - batch_size : int - Size of mini-batches for the DataLoader. - shuffle : bool - Whether to shuffle the training data before splitting into batches. - - Returns - ------- - data_module : MambularDataModule - An instance of MambularDataModule containing training and validation DataLoaders. - """ - train_preprocessed_data = self.preprocessor.fit_transform(X_train, y_train) - val_preprocessed_data = self.preprocessor.transform(X_val) - - # Update feature info based on the actual processed data - ( - self.cat_feature_info, - self.num_feature_info, - ) = self.preprocessor.get_feature_info() - - # Initialize lists for tensors - train_cat_tensors = [] - train_num_tensors = [] - val_cat_tensors = [] - val_num_tensors = [] - - # Populate tensors for categorical features, if present in processed data - for key in self.cat_feature_info: - cat_key = "cat_" + key # Assuming categorical keys are prefixed with 'cat_' - if cat_key in train_preprocessed_data: - train_cat_tensors.append( - torch.tensor(train_preprocessed_data[cat_key], dtype=torch.long) - ) - if cat_key in val_preprocessed_data: - val_cat_tensors.append( - torch.tensor(val_preprocessed_data[cat_key], dtype=torch.long) - ) - - binned_key = "num_" + key # for binned features - if binned_key in train_preprocessed_data: - train_cat_tensors.append( - torch.tensor(train_preprocessed_data[binned_key], dtype=torch.long) - ) - - if binned_key in val_preprocessed_data: - val_cat_tensors.append( - torch.tensor(val_preprocessed_data[binned_key], dtype=torch.long) - ) - - # Populate tensors for numerical features, if present in processed data - for key in self.num_feature_info: - num_key = "num_" + str( - key - ) # Assuming numerical keys are prefixed with 'num_' - if num_key in train_preprocessed_data: - train_num_tensors.append( - torch.tensor(train_preprocessed_data[num_key], dtype=torch.float) - ) - if num_key in val_preprocessed_data: - val_num_tensors.append( - torch.tensor(val_preprocessed_data[num_key], dtype=torch.float) - ) - - train_labels = torch.tensor(y_train, dtype=torch.long) - val_labels = torch.tensor(y_val, dtype=torch.long) - - # Create datasets - train_dataset = EmbeddingMambularDataset( - train_cat_tensors, train_num_tensors, train_labels, regression=False - ) - val_dataset = EmbeddingMambularDataset( - val_cat_tensors, val_num_tensors, val_labels, regression=False - ) - - # Create dataloaders - train_dataloader = DataLoader( - train_dataset, batch_size=batch_size, shuffle=shuffle - ) - val_dataloader = DataLoader(val_dataset, batch_size=batch_size) - - return MambularDataModule(train_dataloader, val_dataloader) - - def preprocess_test_data(self, X): - """ - Preprocesses the test data and creates tensors for categorical and numerical features. - - Parameters - ---------- - X : DataFrame or array-like, shape (n_samples, n_features) - Test feature set. - - Returns - ------- - cat_tensors : list of Tensors - List of tensors for each categorical feature. - num_tensors : list of Tensors - List of tensors for each numerical feature. - """ - processed_data = self.preprocessor.transform(X) - - # Initialize lists for tensors - cat_tensors = [] - num_tensors = [] - - # Populate tensors for categorical features - for key in self.cat_feature_info: - cat_key = "cat_" + str( - key - ) # Assuming categorical keys are prefixed with 'cat_' - if cat_key in processed_data: - cat_tensors.append( - torch.tensor(processed_data[cat_key], dtype=torch.long) - ) - - binned_key = "num_" + str(key) # for binned features - if binned_key in processed_data: - cat_tensors.append( - torch.tensor(processed_data[binned_key], dtype=torch.long) - ) - - # Populate tensors for numerical features - for key in self.num_feature_info: - num_key = "num_" + str( - key - ) # Assuming numerical keys are prefixed with 'num_' - if num_key in processed_data: - num_tensors.append( - torch.tensor(processed_data[num_key], dtype=torch.float) - ) - - return cat_tensors, num_tensors - - def fit( - self, - X, - y, - val_size=0.2, - X_val=None, - y_val=None, - max_epochs=100, - random_state=101, - batch_size=64, - shuffle=True, - patience=10, - monitor="val_loss", - mode="min", - lr=1e-3, - lr_patience=10, - factor=0.75, - weight_decay=0.025, - raw_embeddings=False, - seq_size=20, - pca=False, - reduced_dims=16, - **trainer_kwargs - ): - """ - Fits the model to the given dataset. - - Parameters - ---------- - X : pandas DataFrame or array-like - Feature matrix for training. - y : array-like - Target vector. - val_size : float, optional - Fraction of the data to use for validation if X_val is None. - X_val : pandas DataFrame or array-like, optional - Feature matrix for validation. - y_val : array-like, optional - Target vector for validation. - max_epochs : int, default=100 - Maximum number of epochs for training. - random_state : int, optional - Seed for random number generators. - batch_size : int, default=32 - Size of batches for training and validation. - shuffle : bool, default=True - Whether to shuffle training data before each epoch. - patience : int, default=10 - Patience for early stopping based on val_loss. - monitor : str, default='val_loss' - Metric to monitor for early stopping. - mode : str, default='min' - Mode for early stopping ('min' or 'max'). - lr : float, default=0.001 - Learning rate for the optimizer. - lr_patience : int, default=5 - Patience for learning rate reduction. - factor : float, default=0.1 - Factor for learning rate reduction. - weight_decay : float, default=0.0 - Weight decay for the optimizer. - raw_embeddings : bool, default=False - Whether to use raw features or embeddings. - seq_size : int, optional - Sequence size for embeddings, relevant if raw_embeddings is False. - **trainer_kwargs : dict - Additional arguments for the PyTorch Lightning Trainer. - - Returns - ------- - self : object - The fitted estimator. - """ - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - if X_val: - if not isinstance(X_val, pd.DataFrame): - X_val = pd.DataFrame(X_val) - - # Apply PCA if indicated - if pca: - pca_transformer = PCA(n_components=reduced_dims) - X = pca_transformer.fit_transform( - X - ) # Fit and transform the PCA on the complete dataset - if X_val is not None: - X_val = pca_transformer.transform( - X_val - ) # Transform validation data with the same PCA model - - raw_embeddings = True - - if not X_val: - X_train, X_val, y_train, y_val = self.split_data( - X, y, val_size, random_state - ) - else: - X_train = X - y_train = y - - data_module = self.preprocess_data( - X_train, y_train, X_val, y_val, batch_size, shuffle - ) - - if raw_embeddings: - self.config.d_model = X.shape[1] - - num_classes = len(np.unique(y)) - - self.model = BaseEmbeddingMambularClassifier( - num_classes=num_classes, - config=self.config, - cat_feature_info=self.cat_feature_info, - num_feature_info=self.num_feature_info, - lr=lr, - lr_patience=lr_patience, - lr_factor=factor, - weight_decay=weight_decay, - raw_embeddings=raw_embeddings, - seq_size=seq_size, - ) - - early_stop_callback = EarlyStopping( - monitor=monitor, min_delta=0.00, patience=patience, verbose=False, mode=mode - ) - - checkpoint_callback = ModelCheckpoint( - monitor="val_loss", # Adjust according to your validation metric - mode="min", - save_top_k=1, - dirpath="model_checkpoints", # Specify the directory to save checkpoints - filename="best_model", - ) - - # Initialize the trainer and train the model - trainer = pl.Trainer( - max_epochs=max_epochs, - callbacks=[early_stop_callback, checkpoint_callback], - **trainer_kwargs - ) - trainer.fit(self.model, data_module) - - best_model_path = checkpoint_callback.best_model_path - if best_model_path: - checkpoint = torch.load(best_model_path) - self.model.load_state_dict(checkpoint["state_dict"]) - - return self - - def predict(self, X): - """ - Predict the class labels for the given input samples. - - Parameters - ---------- - X : array-like or pd.DataFrame of shape (n_samples, n_features) - The input samples to predict. - - Returns - ------- - predictions : ndarray of shape (n_samples,) - Predicted class labels for each input sample. - - Notes - ----- - The method preprocesses the input data using the same preprocessor used during training, - sets the model to evaluation mode, and then performs inference to predict the class labels. - The predictions are converted from a PyTorch tensor to a NumPy array before being returned. - """ - # Preprocess the data - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - - if hasattr(self, "pca_transformer"): - X = pd.DataFrame(self.pca_transformer.transform(X)) - - cat_tensors, num_tensors = self.preprocess_test_data(X) - device = next(self.model.parameters()).device - cat_tensors, num_tensors = self.preprocess_test_data(X) - if isinstance(cat_tensors, list): - cat_tensors = [tensor.to(device) for tensor in cat_tensors] - else: - cat_tensors = cat_tensors.to(device) - - if isinstance(num_tensors, list): - num_tensors = [tensor.to(device) for tensor in num_tensors] - else: - num_tensors = num_tensors.to(device) - - # Set the model to evaluation mode - self.model.eval() - - # Perform inference - with torch.no_grad(): - logits = self.model(cat_tensors, num_tensors) - predictions = torch.argmax(logits, dim=1) - - # Convert predictions to NumPy array and return - return predictions.cpu().numpy() - - def predict_proba(self, X): - """ - Predict class probabilities for the given input samples. - - Parameters - ---------- - X : array-like or pd.DataFrame of shape (n_samples, n_features) - The input samples for which to predict class probabilities. - - Returns - ------- - probabilities : ndarray of shape (n_samples, n_classes) - Predicted class probabilities for each input sample. - - Notes - ----- - The method preprocesses the input data using the same preprocessor used during training, - sets the model to evaluation mode, and then performs inference to predict the class probabilities. - Softmax is applied to the logits to obtain probabilities, which are then converted from a PyTorch tensor - to a NumPy array before being returned. - """ - # Preprocess the data - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - cat_tensors, num_tensors = self.preprocess_test_data(X) - device = next(self.model.parameters()).device - cat_tensors, num_tensors = self.preprocess_test_data(X) - if isinstance(cat_tensors, list): - cat_tensors = [tensor.to(device) for tensor in cat_tensors] - else: - cat_tensors = cat_tensors.to(device) - - if isinstance(num_tensors, list): - num_tensors = [tensor.to(device) for tensor in num_tensors] - else: - num_tensors = num_tensors.to(device) - - # Set the model to evaluation mode - self.model.eval() - - # Perform inference - with torch.no_grad(): - logits = self.model(cat_tensors, num_tensors) - probabilities = torch.softmax(logits, dim=1) - - # Convert probabilities to NumPy array and return - return probabilities.cpu().numpy() - - def evaluate(self, X, y_true, metrics=None): - """ - Evaluate the model on the given data using specified metrics. - - Parameters - ---------- - X : array-like or pd.DataFrame of shape (n_samples, n_features) - The input samples to predict. - y_true : array-like of shape (n_samples,) - The true class labels against which to evaluate the predictions. - metrics : dict - A dictionary where keys are metric names and values are tuples containing the metric function - and a boolean indicating whether the metric requires probability scores (True) or class labels (False). - - Returns - ------- - scores : dict - A dictionary with metric names as keys and their corresponding scores as values. - - Notes - ----- - This method uses either the `predict` or `predict_proba` method depending on the metric requirements. - """ - # Ensure input is in the correct format - if metrics is None: - metrics = {"Accuracy": (accuracy_score, False)} - - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - - # Initialize dictionary to store results - scores = {} - - # Generate class probabilities if any metric requires them - if any(use_proba for _, use_proba in metrics.values()): - probabilities = self.predict_proba(X) - - # Generate class labels if any metric requires them - if any(not use_proba for _, use_proba in metrics.values()): - predictions = self.predict(X) - - # Compute each metric - for metric_name, (metric_func, use_proba) in metrics.items(): - if use_proba: - scores[metric_name] = metric_func(y_true, probabilities) - else: - scores[metric_name] = metric_func(y_true, predictions) - - return scores
-
- -
-
-
- -
- -
-

© Copyright 2024, Christoph Weisser.

-
- - Built with Sphinx using a - theme - provided by Read the Docs. - - -
-
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/mambular/models/sklearn_embedding_regressor.html b/docs/_build/html/_modules/mambular/models/sklearn_embedding_regressor.html deleted file mode 100644 index 44798ba..0000000 --- a/docs/_build/html/_modules/mambular/models/sklearn_embedding_regressor.html +++ /dev/null @@ -1,664 +0,0 @@ - - - - - - mambular.models.sklearn_embedding_regressor — mambular 26.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mambular.models.sklearn_embedding_regressor

-from sklearn.model_selection import train_test_split
-import pytorch_lightning as pl
-import torch
-from torch.utils.data import DataLoader
-from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
-from ..base_models.embedding_regressor import BaseEmbeddingMambularRegressor
-from ..utils.config import MambularConfig
-from ..utils.preprocessor import Preprocessor
-from ..utils.dataset import MambularDataModule, EmbeddingMambularDataset
-from sklearn.base import BaseEstimator
-import pandas as pd
-from sklearn.decomposition import PCA
-from sklearn.metrics import mean_squared_error
-
-
-
[docs]class EmbeddingMambularRegressor(BaseEstimator): - """ - An sklearn-like interface for the ProteinMambularRegressor, making it compatible with sklearn's utilities - and workflows. This class wraps the PyTorch Lightning model and preprocessor, providing methods for fitting, - predicting, and setting/getting parameters in a way that mimics sklearn's API. - - Parameters - ---------- - **kwargs : Keyword arguments that can include both configuration parameters for the MambularConfig and - parameters for the preprocessor. - - Attributes - ---------- - config : MambularConfig - Configuration object containing model-specific parameters. - preprocessor : Preprocessor - Preprocessor object for data preprocessing steps. - model : ProteinMambularRegressor - The neural network model, initialized after the `fit` method is called. - """ - - def __init__(self, **kwargs): - # Known config arguments - config_arg_names = [ - "d_model", - "n_layers", - "dt_rank", - "output_dimension", - "pooling_method", - "norm", - "cls", - "dt_min", - "dt_max", - "dropout", - "bias", - "weight_decay", - "conv_bias", - "d_state", - "expand_factor", - "d_conv", - "dt_init", - "dt_scale", - "dt_init_floor", - ] - config_kwargs = {k: v for k, v in kwargs.items() if k in config_arg_names} - self.config = MambularConfig(**config_kwargs) - - # The rest are assumed to be preprocessor arguments - preprocessor_kwargs = { - k: v for k, v in kwargs.items() if k not in config_arg_names - } - - if not "numerical_preprocessing" in preprocessor_kwargs.keys(): - preprocessor_kwargs["numerical_preprocessing"] = "normalization" - self.preprocessor = Preprocessor(**preprocessor_kwargs) - self.model = None - - def get_params(self, deep=True): - """ - Get parameters for this estimator. Overrides the BaseEstimator method. - - Parameters - ---------- - deep : bool, default=True - If True, returns the parameters for this estimator and contained sub-objects that are estimators. - - Returns - ------- - params : dict - Parameter names mapped to their values. - """ - params = self.config_kwargs # Parameters used to initialize MambularConfig - - # If deep=True, include parameters from nested components like preprocessor - if deep: - # Assuming Preprocessor has a get_params method - preprocessor_params = { - "preprocessor__" + key: value - for key, value in self.preprocessor.get_params().items() - } - params.update(preprocessor_params) - - return params - - def set_params(self, **parameters): - """ - Set the parameters of this estimator. Overrides the BaseEstimator method. - - Parameters - ---------- - **parameters : dict - Estimator parameters to be set. - - Returns - ------- - self : object - The instance with updated parameters. - """ - # Update config_kwargs with provided parameters - valid_config_keys = self.config_kwargs.keys() - config_updates = {k: v for k, v in parameters.items() if k in valid_config_keys} - self.config_kwargs.update(config_updates) - - # Update the config object - for key, value in config_updates.items(): - setattr(self.config, key, value) - - # Handle preprocessor parameters (prefixed with 'preprocessor__') - preprocessor_params = { - k.split("__")[1]: v - for k, v in parameters.items() - if k.startswith("preprocessor__") - } - if preprocessor_params: - # Assuming Preprocessor has a set_params method - self.preprocessor.set_params(**preprocessor_params) - - return self - - def split_data(self, X, y, val_size, random_state): - """ - Splits the dataset into training and validation sets. - - Parameters - ---------- - X : array-like or DataFrame, shape (n_samples, n_features) - Input features. - y : array-like, shape (n_samples,) or (n_samples, n_targets) - Target values. - val_size : float - The proportion of the dataset to include in the validation split. - random_state : int - Controls the shuffling applied to the data before applying the split. - - Returns - ------- - X_train, X_val, y_train, y_val : arrays - The split datasets. - """ - X_train, X_val, y_train, y_val = train_test_split( - X, y, test_size=val_size, random_state=random_state - ) - - return X_train, X_val, y_train, y_val - - def preprocess_data(self, X_train, y_train, X_val, y_val, batch_size, shuffle): - """ - Preprocesses the training and validation data, and creates DataLoaders for them. - - Parameters - ---------- - X_train : DataFrame or array-like, shape (n_samples_train, n_features) - Training feature set. - y_train : array-like, shape (n_samples_train,) - Training target values. - X_val : DataFrame or array-like, shape (n_samples_val, n_features) - Validation feature set. - y_val : array-like, shape (n_samples_val,) - Validation target values. - batch_size : int - Size of batches for the DataLoader. - shuffle : bool - Whether to shuffle the training data in the DataLoader. - - Returns - ------- - data_module : MambularDataModule - An instance of MambularDataModule containing the training and validation DataLoaders. - """ - train_preprocessed_data = self.preprocessor.fit_transform(X_train, y_train) - val_preprocessed_data = self.preprocessor.transform(X_val) - - # Update feature info based on the actual processed data - ( - self.cat_feature_info, - self.num_feature_info, - ) = self.preprocessor.get_feature_info() - - # Initialize lists for tensors - train_cat_tensors = [] - train_num_tensors = [] - val_cat_tensors = [] - val_num_tensors = [] - - # Populate tensors for categorical features, if present in processed data - for key in self.cat_feature_info: - cat_key = "cat_" + key # Assuming categorical keys are prefixed with 'cat_' - if cat_key in train_preprocessed_data: - train_cat_tensors.append( - torch.tensor(train_preprocessed_data[cat_key], dtype=torch.long) - ) - if cat_key in val_preprocessed_data: - val_cat_tensors.append( - torch.tensor(val_preprocessed_data[cat_key], dtype=torch.long) - ) - - binned_key = "num_" + key # for binned features - if binned_key in train_preprocessed_data: - train_cat_tensors.append( - torch.tensor(train_preprocessed_data[binned_key], dtype=torch.long) - ) - - if binned_key in val_preprocessed_data: - val_cat_tensors.append( - torch.tensor(val_preprocessed_data[binned_key], dtype=torch.long) - ) - - # Populate tensors for numerical features, if present in processed data - for key in self.num_feature_info: - num_key = "num_" + str( - key - ) # Assuming numerical keys are prefixed with 'num_' - if num_key in train_preprocessed_data: - train_num_tensors.append( - torch.tensor(train_preprocessed_data[num_key], dtype=torch.float) - ) - if num_key in val_preprocessed_data: - val_num_tensors.append( - torch.tensor(val_preprocessed_data[num_key], dtype=torch.float) - ) - - train_labels = torch.tensor(y_train, dtype=torch.float) - val_labels = torch.tensor(y_val, dtype=torch.float) - - # Create datasets - train_dataset = EmbeddingMambularDataset( - train_cat_tensors, train_num_tensors, train_labels - ) - val_dataset = EmbeddingMambularDataset( - val_cat_tensors, val_num_tensors, val_labels - ) - - # Create dataloaders - train_dataloader = DataLoader( - train_dataset, batch_size=batch_size, shuffle=shuffle - ) - val_dataloader = DataLoader(val_dataset, batch_size=batch_size) - - return MambularDataModule(train_dataloader, val_dataloader) - - def preprocess_test_data(self, X): - """ - Preprocesses the test data and creates tensors for categorical and numerical features. - - Parameters - ---------- - X : DataFrame or array-like, shape (n_samples, n_features) - Test feature set. - - Returns - ------- - cat_tensors : list of Tensors - List of tensors for each categorical feature. - num_tensors : list of Tensors - List of tensors for each numerical feature. - """ - processed_data = self.preprocessor.transform(X) - - # Initialize lists for tensors - cat_tensors = [] - num_tensors = [] - - # Populate tensors for categorical features - for key in self.cat_feature_info: - cat_key = "cat_" + str( - key - ) # Assuming categorical keys are prefixed with 'cat_' - if cat_key in processed_data: - cat_tensors.append( - torch.tensor(processed_data[cat_key], dtype=torch.long) - ) - - binned_key = "num_" + str(key) # for binned features - if binned_key in processed_data: - cat_tensors.append( - torch.tensor(processed_data[binned_key], dtype=torch.long) - ) - - # Populate tensors for numerical features - for key in self.num_feature_info: - num_key = "num_" + str( - key - ) # Assuming numerical keys are prefixed with 'num_' - if num_key in processed_data: - num_tensors.append( - torch.tensor(processed_data[num_key], dtype=torch.float) - ) - - return cat_tensors, num_tensors - - def fit( - self, - X, - y, - val_size=0.2, - X_val=None, - y_val=None, - max_epochs=100, - random_state=101, - batch_size=64, - shuffle=True, - patience=10, - monitor="val_loss", - mode="min", - lr=1e-3, - lr_patience=10, - factor=0.75, - weight_decay=0.025, - raw_embeddings=False, - seq_size=20, - pca=False, - **trainer_kwargs - ): - """ - Fits the ProteinMambularRegressor model to the training data. - - Parameters - ---------- - X : array-like or DataFrame - The training input samples. - y : array-like - The target values (class labels for classification, real numbers for regression). - val_size : float, optional - The proportion of the dataset to include in the validation split if `X_val` is not provided. - X_val : array-like or DataFrame, optional - The validation input samples. - y_val : array-like, optional - The validation target values. - max_epochs : int, optional - The maximum number of epochs for training. - random_state : int, optional - The seed used by the random number generator. - batch_size : int, optional - Size of the batches for training. - shuffle : bool, optional - Whether to shuffle the training data. - patience : int, optional - Patience for early stopping. - monitor : str, optional - Quantity to be monitored for early stopping. - mode : str, optional - One of {'auto', 'min', 'max'}. In 'min' mode, training will stop when the quantity monitored has stopped decreasing; - in 'max' mode, it will stop when the quantity monitored has stopped increasing. - lr : float, optional - Learning rate for the optimizer. - lr_patience : int, optional - Number of epochs with no improvement after which the learning rate will be reduced. - factor : float, optional - Factor by which the learning rate will be reduced. - weight_decay : float, optional - Weight decay coefficient for regularization in the optimizer. - raw_embeddings : bool, optional - Whether to use raw numerical features directly or to process them into embeddings. - seq_size : int, optional - The sequence size for processing numerical features when not using raw embeddings. - **trainer_kwargs : dict - Additional keyword arguments for the PyTorch Lightning Trainer. - - Returns - ------- - self : object - Returns an instance of self. - """ - - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - if X_val: - if not isinstance(X_val, pd.DataFrame): - X_val = pd.DataFrame(X_val) - - # Apply PCA if indicated - if pca: - self.pca_transformer = PCA(n_components=seq_size) - X = pd.DataFrame( - self.pca_transformer.fit_transform(X) - ) # Fit and transform the PCA on the complete dataset - if X_val is not None: - X_val = pd.DataFrame( - self.pca_transformer.transform(X_val) - ) # Transform validation data with the same PCA model - - raw_embeddings = True - - if not X_val: - X_train, X_val, y_train, y_val = self.split_data( - X, y, val_size, random_state - ) - else: - X_train = X - y_train = y - - data_module = self.preprocess_data( - X_train, y_train, X_val, y_val, batch_size, shuffle - ) - - if raw_embeddings: - self.config.d_model = X.shape[1] - - self.model = BaseEmbeddingMambularRegressor( - config=self.config, - cat_feature_info=self.cat_feature_info, - num_feature_info=self.num_feature_info, - lr=lr, - lr_patience=lr_patience, - lr_factor=factor, - weight_decay=weight_decay, - raw_embeddings=raw_embeddings, - seq_size=seq_size, - ) - - early_stop_callback = EarlyStopping( - monitor=monitor, min_delta=0.00, patience=patience, verbose=False, mode=mode - ) - - checkpoint_callback = ModelCheckpoint( - monitor="val_loss", # Adjust according to your validation metric - mode="min", - save_top_k=1, - dirpath="model_checkpoints", # Specify the directory to save checkpoints - filename="best_model", - ) - - # Initialize the trainer and train the model - trainer = pl.Trainer( - max_epochs=max_epochs, - callbacks=[early_stop_callback, checkpoint_callback], - **trainer_kwargs - ) - trainer.fit(self.model, data_module) - - best_model_path = checkpoint_callback.best_model_path - if best_model_path: - checkpoint = torch.load(best_model_path) - self.model.load_state_dict(checkpoint["state_dict"]) - - return self - - def predict(self, X): - """ - Predicts target values for the given input samples. - - Parameters - ---------- - X : DataFrame or array-like, shape (n_samples, n_features) - The input samples for which to predict target values. - - Returns - ------- - predictions : ndarray, shape (n_samples,) or (n_samples, n_outputs) - The predicted target values. - """ - # Preprocess the data - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - - if hasattr(self, "pca_transformer"): - X = pd.DataFrame(self.pca_transformer.transform(X)) - - device = next(self.model.parameters()).device - cat_tensors, num_tensors = self.preprocess_test_data(X) - if isinstance(cat_tensors, list): - cat_tensors = [tensor.to(device) for tensor in cat_tensors] - else: - cat_tensors = cat_tensors.to(device) - - if isinstance(num_tensors, list): - num_tensors = [tensor.to(device) for tensor in num_tensors] - else: - num_tensors = num_tensors.to(device) - - # Set the model to evaluation mode - self.model.eval() - - # Perform inference - with torch.no_grad(): - predictions = self.model(cat_tensors, num_tensors) - - # Convert predictions to NumPy array and return - return predictions.cpu().numpy() - - def evaluate(self, X, y_true, metrics=None): - """ - Evaluate the model on the given data using specified metrics. - - Example: - metrics = { - 'Mean Squared Error': mean_squared_error, - 'R2 Score': r2_score - } - - # Assuming 'X_test' and 'y_test' are your test dataset and labels - # Evaluate using the specified metrics - results = regressor.evaluate(X_test, y_test, metrics=metrics) - - Parameters - ---------- - X : array-like or pd.DataFrame of shape (n_samples, n_features) - The input samples to predict. - y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) - The true target values against which to evaluate the predictions. - metrics : dict - A dictionary where keys are metric names and values are the metric functions. - - Returns - ------- - scores : dict - A dictionary with metric names as keys and their corresponding scores as values. - - Notes - ----- - This method uses the `predict` method to generate predictions and computes each metric. - """ - if metrics is None: - metrics = {"Mean Squared Error": mean_squared_error} - - # Ensure input is in the correct format - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - - # Generate predictions using the trained model - predictions = self.predict(X) - - # Initialize dictionary to store results - scores = {} - - # Compute each metric - for metric_name, metric_func in metrics.items(): - scores[metric_name] = metric_func(y_true, predictions) - - return scores
-
- -
-
-
- -
- -
-

© Copyright 2024, Christoph Weisser.

-
- - Built with Sphinx using a - theme - provided by Read the Docs. - - -
-
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/mambular/models/sklearn_regressor.html b/docs/_build/html/_modules/mambular/models/sklearn_regressor.html deleted file mode 100644 index 9257a44..0000000 --- a/docs/_build/html/_modules/mambular/models/sklearn_regressor.html +++ /dev/null @@ -1,625 +0,0 @@ - - - - - - mambular.models.sklearn_regressor — mambular 26.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • -
-
-
-
-
- -

Source code for mambular.models.sklearn_regressor

-from sklearn.model_selection import train_test_split
-import pytorch_lightning as pl
-import torch
-from torch.utils.data import DataLoader
-from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
-from ..base_models.regressor import BaseMambularRegressor
-from ..utils.config import MambularConfig
-from ..utils.preprocessor import Preprocessor
-from ..utils.dataset import MambularDataModule, MambularDataset
-from sklearn.base import BaseEstimator
-import pandas as pd
-from sklearn.metrics import mean_squared_error
-
-
-
[docs]class MambularRegressor(BaseEstimator): - """ - A regressor implemented using PyTorch Lightning that follows the scikit-learn API conventions. This class is designed - to work with tabular data, offering a straightforward way to specify model configurations and preprocessing steps. It - integrates seamlessly with scikit-learn's tools such as cross-validation and grid search. - - Parameters - ---------- - **kwargs : Various - Accepts any number of keyword arguments. Arguments recognized as model configuration options are passed to the - MambularConfig constructor. Remaining arguments are assumed to be preprocessor options and passed to the - Preprocessor constructor. - - Attributes - ---------- - config : MambularConfig - An object storing the configuration settings for the model. - preprocessor : Preprocessor - An object responsible for preprocessing the input data, such as encoding categorical variables and scaling numerical features. - model : BaseMambularRegressor or None - The underlying regression model, which is a PyTorch Lightning module. It is instantiated when the `fit` method is called. - """ - - def __init__(self, **kwargs): - # Known config arguments - print("Received kwargs:", kwargs) - config_arg_names = [ - "d_model", - "n_layers", - "dt_rank", - "output_dimension", - "pooling_method", - "norm", - "cls", - "dt_min", - "dt_max", - "dropout", - "bias", - "weight_decay", - "conv_bias", - "d_state", - "expand_factor", - "d_conv", - "dt_init", - "dt_scale", - "dt_init_floor", - "tabular_head_units", - "tabular_head_activation", - "tabular_head_dropout", - "num_emebedding_activation", - "layer_norm_after_embedding", - ] - self.config_kwargs = {k: v for k, v in kwargs.items() if k in config_arg_names} - self.config = MambularConfig(**self.config_kwargs) - - # The rest are assumed to be preprocessor arguments - preprocessor_kwargs = { - k: v for k, v in kwargs.items() if k not in config_arg_names - } - self.preprocessor = Preprocessor(**preprocessor_kwargs) - self.model = None - - def get_params(self, deep=True): - """ - Get parameters for this estimator. Overrides the BaseEstimator method. - - Parameters - ---------- - deep : bool, default=True - If True, returns the parameters for this estimator and contained sub-objects that are estimators. - - Returns - ------- - params : dict - Parameter names mapped to their values. - """ - params = self.config_kwargs # Parameters used to initialize MambularConfig - - # If deep=True, include parameters from nested components like preprocessor - if deep: - # Assuming Preprocessor has a get_params method - preprocessor_params = { - "preprocessor__" + key: value - for key, value in self.preprocessor.get_params().items() - } - params.update(preprocessor_params) - - return params - - def set_params(self, **parameters): - """ - Set the parameters of this estimator. Overrides the BaseEstimator method. - - Parameters - ---------- - **parameters : dict - Estimator parameters to be set. - - Returns - ------- - self : object - The instance with updated parameters. - """ - # Update config_kwargs with provided parameters - valid_config_keys = self.config_kwargs.keys() - config_updates = {k: v for k, v in parameters.items() if k in valid_config_keys} - self.config_kwargs.update(config_updates) - - # Update the config object - for key, value in config_updates.items(): - setattr(self.config, key, value) - - # Handle preprocessor parameters (prefixed with 'preprocessor__') - preprocessor_params = { - k.split("__")[1]: v - for k, v in parameters.items() - if k.startswith("preprocessor__") - } - if preprocessor_params: - # Assuming Preprocessor has a set_params method - self.preprocessor.set_params(**preprocessor_params) - - return self - - def split_data(self, X, y, val_size, random_state): - """ - Splits the dataset into training and validation sets. - - Parameters - ---------- - X : array-like or DataFrame, shape (n_samples, n_features) - Input features. - y : array-like, shape (n_samples,) or (n_samples, n_targets) - Target values. - val_size : float - The proportion of the dataset to include in the validation split. - random_state : int - Controls the shuffling applied to the data before applying the split. - - Returns - ------- - X_train, X_val, y_train, y_val : arrays - The split datasets. - """ - X_train, X_val, y_train, y_val = train_test_split( - X, y, test_size=val_size, random_state=random_state - ) - - return X_train, X_val, y_train, y_val - - def preprocess_data(self, X_train, y_train, X_val, y_val, batch_size, shuffle): - """ - Preprocesses the training and validation data, and creates DataLoaders for them. - - Parameters - ---------- - X_train : DataFrame or array-like, shape (n_samples_train, n_features) - Training feature set. - y_train : array-like, shape (n_samples_train,) - Training target values. - X_val : DataFrame or array-like, shape (n_samples_val, n_features) - Validation feature set. - y_val : array-like, shape (n_samples_val,) - Validation target values. - batch_size : int - Size of batches for the DataLoader. - shuffle : bool - Whether to shuffle the training data in the DataLoader. - - Returns - ------- - data_module : MambularDataModule - An instance of MambularDataModule containing the training and validation DataLoaders. - """ - train_preprocessed_data = self.preprocessor.fit_transform(X_train, y_train) - val_preprocessed_data = self.preprocessor.transform(X_val) - - # Update feature info based on the actual processed data - ( - self.cat_feature_info, - self.num_feature_info, - ) = self.preprocessor.get_feature_info() - - # Initialize lists for tensors - train_cat_tensors = [] - train_num_tensors = [] - val_cat_tensors = [] - val_num_tensors = [] - - # Populate tensors for categorical features, if present in processed data - for key in self.cat_feature_info: - cat_key = "cat_" + key # Assuming categorical keys are prefixed with 'cat_' - if cat_key in train_preprocessed_data: - train_cat_tensors.append( - torch.tensor(train_preprocessed_data[cat_key], dtype=torch.long) - ) - if cat_key in val_preprocessed_data: - val_cat_tensors.append( - torch.tensor(val_preprocessed_data[cat_key], dtype=torch.long) - ) - - binned_key = "num_" + key # for binned features - if binned_key in train_preprocessed_data: - train_cat_tensors.append( - torch.tensor(train_preprocessed_data[binned_key], dtype=torch.long) - ) - - if binned_key in val_preprocessed_data: - val_cat_tensors.append( - torch.tensor(val_preprocessed_data[binned_key], dtype=torch.long) - ) - - # Populate tensors for numerical features, if present in processed data - for key in self.num_feature_info: - num_key = "num_" + key # Assuming numerical keys are prefixed with 'num_' - if num_key in train_preprocessed_data: - train_num_tensors.append( - torch.tensor(train_preprocessed_data[num_key], dtype=torch.float) - ) - if num_key in val_preprocessed_data: - val_num_tensors.append( - torch.tensor(val_preprocessed_data[num_key], dtype=torch.float) - ) - - train_labels = torch.tensor(y_train, dtype=torch.float) - val_labels = torch.tensor(y_val, dtype=torch.float) - - # Create datasets - train_dataset = MambularDataset( - train_cat_tensors, train_num_tensors, train_labels - ) - val_dataset = MambularDataset(val_cat_tensors, val_num_tensors, val_labels) - - # Create dataloaders - train_dataloader = DataLoader( - train_dataset, batch_size=batch_size, shuffle=shuffle - ) - val_dataloader = DataLoader(val_dataset, batch_size=batch_size) - - return MambularDataModule(train_dataloader, val_dataloader) - - def preprocess_test_data(self, X): - """ - Preprocesses the test data and creates tensors for categorical and numerical features. - - Parameters - ---------- - X : DataFrame or array-like, shape (n_samples, n_features) - Test feature set. - - Returns - ------- - cat_tensors : list of Tensors - List of tensors for each categorical feature. - num_tensors : list of Tensors - List of tensors for each numerical feature. - """ - processed_data = self.preprocessor.transform(X) - - # Initialize lists for tensors - cat_tensors = [] - num_tensors = [] - - # Populate tensors for categorical features - for key in self.cat_feature_info: - cat_key = "cat_" + key # Assuming categorical keys are prefixed with 'cat_' - if cat_key in processed_data: - cat_tensors.append( - torch.tensor(processed_data[cat_key], dtype=torch.long) - ) - - binned_key = "num_" + key # for binned features - if binned_key in processed_data: - cat_tensors.append( - torch.tensor(processed_data[binned_key], dtype=torch.long) - ) - - # Populate tensors for numerical features - for key in self.num_feature_info: - num_key = "num_" + key # Assuming numerical keys are prefixed with 'num_' - if num_key in processed_data: - num_tensors.append( - torch.tensor(processed_data[num_key], dtype=torch.float) - ) - - return cat_tensors, num_tensors - - def fit( - self, - X, - y, - val_size=0.2, - X_val=None, - y_val=None, - max_epochs=100, - random_state=101, - batch_size=128, - shuffle=True, - patience=10, - monitor="val_loss", - mode="min", - lr=1e-3, - lr_patience=5, - factor=0.75, - weight_decay=1e-06, - **trainer_kwargs - ): - """ - Trains the regression model using the provided training data. Optionally, a separate validation set can be used. - - Parameters - ---------- - X : DataFrame or array-like, shape (n_samples, n_features) - The training input samples. - y : array-like, shape (n_samples,) or (n_samples, n_targets) - The target values (real numbers). - val_size : float, default=0.2 - The proportion of the dataset to include in the validation split if `X_val` is None. Ignored if `X_val` is provided. - X_val : DataFrame or array-like, shape (n_samples, n_features), optional - The validation input samples. If provided, `X` and `y` are not split and this data is used for validation. - y_val : array-like, shape (n_samples,) or (n_samples, n_targets), optional - The validation target values. Required if `X_val` is provided. - max_epochs : int, default=100 - Maximum number of epochs for training. - random_state : int, default=101 - Controls the shuffling applied to the data before applying the split. - batch_size : int, default=64 - Number of samples per gradient update. - shuffle : bool, default=True - Whether to shuffle the training data before each epoch. - patience : int, default=10 - Number of epochs with no improvement on the validation loss to wait before early stopping. - monitor : str, default="val_loss" - The metric to monitor for early stopping. - mode : str, default="min" - Whether the monitored metric should be minimized (`min`) or maximized (`max`). - lr : float, default=1e-3 - Learning rate for the optimizer. - lr_patience : int, default=10 - Number of epochs with no improvement on the validation loss to wait before reducing the learning rate. - factor : float, default=0.75 - Factor by which the learning rate will be reduced. - weight_decay : float, default=0.025 - Weight decay (L2 penalty) coefficient. - **trainer_kwargs : Additional keyword arguments for PyTorch Lightning's Trainer class. - - Returns - ------- - self : object - The fitted regressor. - """ - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - if X_val: - if not isinstance(X_val, pd.DataFrame): - X_val = pd.DataFrame(X_val) - - if not X_val: - X_train, X_val, y_train, y_val = self.split_data( - X, y, val_size, random_state - ) - - data_module = self.preprocess_data( - X_train, y_train, X_val, y_val, batch_size, shuffle - ) - - self.model = BaseMambularRegressor( - config=self.config, - cat_feature_info=self.cat_feature_info, - num_feature_info=self.num_feature_info, - lr=lr, - lr_patience=lr_patience, - lr_factor=factor, - weight_decay=weight_decay, - ) - - early_stop_callback = EarlyStopping( - monitor=monitor, min_delta=0.00, patience=patience, verbose=False, mode=mode - ) - - checkpoint_callback = ModelCheckpoint( - monitor="val_loss", # Adjust according to your validation metric - mode="min", - save_top_k=1, - dirpath="model_checkpoints", # Specify the directory to save checkpoints - filename="best_model", - ) - - # Initialize the trainer and train the model - trainer = pl.Trainer( - max_epochs=max_epochs, - callbacks=[early_stop_callback, checkpoint_callback], - **trainer_kwargs - ) - trainer.fit(self.model, data_module) - - best_model_path = checkpoint_callback.best_model_path - if best_model_path: - checkpoint = torch.load(best_model_path) - self.model.load_state_dict(checkpoint["state_dict"]) - - return self - - def predict(self, X): - """ - Predicts target values for the given input samples. - - Parameters - ---------- - X : DataFrame or array-like, shape (n_samples, n_features) - The input samples for which to predict target values. - - Returns - ------- - predictions : ndarray, shape (n_samples,) or (n_samples, n_outputs) - The predicted target values. - """ - # Preprocess the data - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - device = next(self.model.parameters()).device - cat_tensors, num_tensors = self.preprocess_test_data(X) - if isinstance(cat_tensors, list): - cat_tensors = [tensor.to(device) for tensor in cat_tensors] - else: - cat_tensors = cat_tensors.to(device) - - if isinstance(num_tensors, list): - num_tensors = [tensor.to(device) for tensor in num_tensors] - else: - num_tensors = num_tensors.to(device) - - # Set the model to evaluation mode - self.model.eval() - - # Perform inference - with torch.no_grad(): - predictions = self.model(cat_tensors, num_tensors) - - # Convert predictions to NumPy array and return - return predictions.cpu().numpy() - - def evaluate(self, X, y_true, metrics=None): - """ - Evaluate the model on the given data using specified metrics. - - Example: - metrics = { - 'Mean Squared Error': mean_squared_error, - 'R2 Score': r2_score - } - - # Assuming 'X_test' and 'y_test' are your test dataset and labels - # Evaluate using the specified metrics - results = regressor.evaluate(X_test, y_test, metrics=metrics) - - Parameters - ---------- - X : array-like or pd.DataFrame of shape (n_samples, n_features) - The input samples to predict. - y_true : array-like of shape (n_samples,) or (n_samples, n_outputs) - The true target values against which to evaluate the predictions. - metrics : dict - A dictionary where keys are metric names and values are the metric functions. - - Returns - ------- - scores : dict - A dictionary with metric names as keys and their corresponding scores as values. - - Notes - ----- - This method uses the `predict` method to generate predictions and computes each metric. - """ - if metrics is None: - metrics = {"Mean Squared Error": mean_squared_error} - - # Ensure input is in the correct format - if not isinstance(X, pd.DataFrame): - X = pd.DataFrame(X) - - # Generate predictions using the trained model - predictions = self.predict(X) - - # Initialize dictionary to store results - scores = {} - - # Compute each metric - for metric_name, metric_func in metrics.items(): - scores[metric_name] = metric_func(y_true, predictions) - - return scores
-
- -
-
-
- -
- -
-

© Copyright 2024, Christoph Weisser.

-
- - Built with Sphinx using a - theme - provided by Read the Docs. - - -
-
-
-
-
- - - - \ No newline at end of file diff --git a/docs/_build/html/_sources/api/base_models/BaseModels.rst.txt b/docs/_build/html/_sources/api/base_models/BaseModels.rst.txt deleted file mode 100644 index 9d20f8b..0000000 --- a/docs/_build/html/_sources/api/base_models/BaseModels.rst.txt +++ /dev/null @@ -1,23 +0,0 @@ -Base Models -=========== - -.. autoclass:: mambular.base_models.classifier.BaseMambularClassifier - :members: - :undoc-members: - :no-inherited-members: -.. autoclass:: mambular.base_models.distributional.BaseMambularLSS - :members: - :undoc-members: - :no-inherited-members: -.. autoclass:: mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier - :members: - :undoc-members: - :no-inherited-members: -.. autoclass:: mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor - :members: - :undoc-members: - :no-inherited-members: -.. autoclass:: mambular.base_models.regressor.BaseMambularRegressor - :members: - :undoc-members: - :no-inherited-members: diff --git a/docs/_build/html/_sources/api/base_models/index.rst.txt b/docs/_build/html/_sources/api/base_models/index.rst.txt deleted file mode 100644 index 3aae198..0000000 --- a/docs/_build/html/_sources/api/base_models/index.rst.txt +++ /dev/null @@ -1,28 +0,0 @@ -.. -*- mode: rst -*- - -.. currentmodule:: mambular.base_models - -BaseModels -========== - -This module provides base classes for the Mambular models. - -========================================= =================================================================================================== -Functionality Description -========================================= =================================================================================================== -:class:`BaseMambularClassifier` Multi-class and binary classification tasks. -:class:`BaseMambularLSS` Various statistical distribution families for different types of regression and classification tasks. -:class:`Base EmbeddingMambularClassifier` Specialized classification module for complex protein sequence data. -:class:`BaseEmbeddingMambularRegressor` Specialized regression module for complex protein sequence data. -:class:`BaseMambularRegressor` Regression tasks. -========================================= =================================================================================================== - - -.. toctree:: - :maxdepth: 1 - :hidden: - - BaseModels - - - diff --git a/docs/_build/html/_sources/api/models/Models.rst.txt b/docs/_build/html/_sources/api/models/Models.rst.txt deleted file mode 100644 index 017a589..0000000 --- a/docs/_build/html/_sources/api/models/Models.rst.txt +++ /dev/null @@ -1,8 +0,0 @@ -Models -=========== - -.. autoclass:: mambular.models.sklearn_classifier.MambularClassifier -.. autoclass:: mambular.models.sklearn_distributional.MambularLSS -.. autoclass:: mambular.models.sklearn_embedding_classifier.EmbeddingMambularClassifier -.. autoclass:: mambular.models.sklearn_embedding_regressor.EmbeddingMambularRegressor -.. autoclass:: mambular.models.sklearn_regressor.MambularRegressor diff --git a/docs/_build/html/_sources/api/models/index.rst.txt b/docs/_build/html/_sources/api/models/index.rst.txt deleted file mode 100644 index a02df09..0000000 --- a/docs/_build/html/_sources/api/models/index.rst.txt +++ /dev/null @@ -1,25 +0,0 @@ -.. -*- mode: rst -*- - -.. currentmodule:: mambular.models - -Models -====== - -This module provides classes for the Mambular models that adhere to scikit-learn's `BaseEstimator` interface. - -======================================= ===================================================================================================== -Functionality Description -======================================= ===================================================================================================== -:class:`MambularClassifier` Multi-class and binary classification tasks. -:class:`MambularLSS` Various statistical distribution families for different types of regression and classification tasks. -:class:`EmbeddingMambularClassifier` Specialized classification module for complex protein sequence data. -:class:`EmbeddingMambularRegressor` Specialized regression module for complex protein sequence data. -:class:`MambularRegressor` Regression tasks. -======================================= ===================================================================================================== - -.. toctree:: - :maxdepth: 1 - :hidden: - - Models - diff --git a/docs/_build/html/_sources/index.rst.txt b/docs/_build/html/_sources/index.rst.txt deleted file mode 100644 index 779f49e..0000000 --- a/docs/_build/html/_sources/index.rst.txt +++ /dev/null @@ -1,27 +0,0 @@ -.. mamba-tabular documentation master file, created by - sphinx-quickstart on Mon May 6 16:16:57 2024. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -.. mdinclude:: mambular.md - -.. toctree:: - :name: Getting Started - :caption: Getting Started - :maxdepth: 2 - :hidden: - - mamba - installation - quickstart - -.. toctree:: - :name: API Docs - :caption: API Docs - :maxdepth: 1 - :hidden: - - - api/models/index - api/base_models/index - diff --git a/docs/_build/html/_sources/installation.md.txt b/docs/_build/html/_sources/installation.md.txt deleted file mode 100644 index d3e36df..0000000 --- a/docs/_build/html/_sources/installation.md.txt +++ /dev/null @@ -1,14 +0,0 @@ -# Installation - -Please follow the steps below for installing `mambular` - -Install from the source: - -```bash -cd mamba-tabular -pip install . -``` - -Note: Make sure you in the same directory where `setup.py` file resides. - -This package is so far not available in PyPi. diff --git a/docs/_build/html/_sources/mambular.md.txt b/docs/_build/html/_sources/mambular.md.txt deleted file mode 100644 index 7c5b354..0000000 --- a/docs/_build/html/_sources/mambular.md.txt +++ /dev/null @@ -1,140 +0,0 @@ -# Mambular: Tabular Deep Learning with Mamba Architectures - -Mambular is a Python package that brings the power of Mamba architectures to tabular data, offering a suite of deep learning models for regression, classification, and distributional regression tasks. Designed with ease of use in mind, Mambular models adhere to scikit-learn's `BaseEstimator` interface, making them highly compatible with the familiar scikit-learn ecosystem. This means you can fit, predict, and transform using Mambular models just as you would with any traditional scikit-learn model, but with the added performance and flexibility of deep learning. - -## Features - -- **Comprehensive Model Suite**: Includes modules for regression (`MambularRegressor`), classification (`MambularClassifier`), and distributional regression (`MambularLSS`), catering to a wide range of tabular data tasks. -- **State-of-the-Art Architectures**: Leverages the Mamba architecture, known for its effectiveness in handling sequential and time-series data within a state-space modeling framework, adapted here for tabular data. -- **Seamless Integration**: Designed to work effortlessly with scikit-learn, allowing for easy inclusion in existing machine learning pipelines, cross-validation, and hyperparameter tuning workflows. -- **Extensive Preprocessing**: Comes with a powerful preprocessing module that supports a broad array of data transformation techniques, ensuring that your data is optimally prepared for model training. -- **Sklearn-like API**: The familiar scikit-learn `fit`, `predict`, and `predict_proba` methods mean minimal learning curve for those already accustomed to scikit-learn. -- **PyTorch Lightning Under the Hood**: Built on top of PyTorch Lightning, Mambular models benefit from streamlined training processes, easy customization, and advanced features like distributed training and 16-bit precision. - -## Documentation - -You can find the Mamba-Tabular API documentation [here](https://mamba-tabular.readthedocs.io/en/latest/index.html). - -## Installation - -Install Mambular using pip: -```sh -pip install mambular -``` - -## Preprocessing - -Mambular elevates the preprocessing stage of model development, employing a sophisticated suite of techniques to ensure your data is in the best shape for the Mamba architectures. Our preprocessing module is designed to be both powerful and intuitive, offering a range of options to transform your tabular data efficiently. - -### Data Type Detection and Transformation - -Mambular automatically identifies the type of each feature in your dataset, applying the most suitable transformations to numerical and categorical variables. This includes: - -- **Ordinal Encoding**: Categorical features are seamlessly transformed into numerical values, preserving their inherent order and making them model-ready. -- **One-Hot Encoding**: For nominal data, Mambular employs one-hot encoding to capture the presence or absence of categories without imposing ordinality. -- **Binning**: Numerical features can be discretized into bins, a useful technique for handling continuous variables in certain modeling contexts. -- **Decision Tree Binning**: Optionally, Mambular can use decision trees to find the optimal binning strategy for numerical features, enhancing model interpretability and performance. -- **Normalization**: Mambular can easily handle numerical features without specifically turning them into categorical features. Standard preprocessing steps such as normalization per feature are possible -- **Standardization**: Similarly, Standardization instead of Normalization can be used. - - -### Handling Missing Values - -Our preprocessing pipeline gracefully handles missing data, employing strategies like mean imputation for numerical features and mode imputation for categorical ones, ensuring that your models receive complete data inputs without manual intervention. - -### Flexible and Customizable - -While Mambular excels in automating the preprocessing workflow, it also offers flexibility. You can customize the preprocessing steps to fit the unique needs of your dataset, ensuring that you're not locked into a one-size-fits-all approach. - -By integrating Mambular's preprocessing module into your workflow, you're not just preparing your data for deep learning; you're optimizing it for excellence. This commitment to data quality is what sets Mambular apart, making it an indispensable tool in your machine learning arsenal. - - -## Fit a Model -Fitting a model in mambular is as simple as it gets. All models in mambular are sklearn BaseEstimators. Thus the `.fit` method is implemented for all of them. Additionally, this allows for using all other sklearn inherent methods such as their built in hyperparameter optimization tools. - -```python -from mambular.models import MambularClassifier -# Initialize and fit your model -model = MambularClassifier( - dropout=0.01, - d_model=128, - n_layers=6, - numerical_preprocessing="normalization", -) - -# X can be a dataframe or something that can be easily transformed into a pd.DataFrame as a np.array -model.fit(X, y, max_epochs=500, lr=1e-03, patience=25) -``` - -Predictions are also easily obtained: -```python -# simple predictions -preds = model.predict(X) - -# Predict probabilities -preds = model.predict_proba(X) -``` - - -## Distributional Regression with MambularLSS - -Mambular introduces a cutting-edge approach to distributional regression through its `MambularLSS` module, empowering users to model the full distribution of a response variable, not just its mean. This method is particularly valuable in scenarios where understanding the variability, skewness, or kurtosis of the response distribution is as crucial as predicting its central tendency. - -### Key Features of MambularLSS: - -- **Full Distribution Modeling**: Unlike traditional regression models that predict a single value (e.g., the mean), `MambularLSS` models the entire distribution of the response variable. This allows for more informative predictions, including quantiles, variance, and higher moments. -- **Customizable Distribution Types**: `MambularLSS` supports a variety of distribution families (e.g., Gaussian, Poisson, Binomial), making it adaptable to different types of response variables, from continuous to count data. -- **Location, Scale, Shape Parameters**: The model predicts parameters corresponding to the location, scale, and shape of the distribution, offering a nuanced understanding of the data's underlying distributional characteristics. -- **Enhanced Predictive Uncertainty**: By modeling the full distribution, `MambularLSS` provides richer information on predictive uncertainty, enabling more robust decision-making processes in uncertain environments. - - -### Available Distribution Classes: - -`MambularLSS` offers a wide range of distribution classes to cater to various statistical modeling needs. The available distribution classes include: - -- `normal`: Normal Distribution for modeling continuous data with a symmetric distribution around the mean. -- `poisson`: Poisson Distribution for modeling count data that for instance represent the number of events occurring within a fixed interval. -- `gamma`: Gamma Distribution for modeling continuous data that is skewed and bounded at zero, often used for waiting times. -- `beta`: Beta Distribution for modeling data that is bounded between 0 and 1, useful for proportions and percentages. -- `dirichlet`: Dirichlet Distribution for modeling multivariate data where individual components are correlated, and the sum is constrained to 1. -- `studentt`: Student's T-Distribution for modeling data with heavier tails than the normal distribution, useful when the sample size is small. -- `negativebinom`: Negative Binomial Distribution for modeling count data with over-dispersion relative to the Poisson distribution. -- `inversegamma`: Inverse Gamma Distribution, often used as a prior distribution in Bayesian inference for scale parameters. -- `categorical`: Categorical Distribution for modeling categorical data with more than two categories. - -These distribution classes allow `MambularLSS` to flexibly model a wide variety of data types and distributions, providing users with the tools needed to capture the full complexity of their data. - - -### Use Cases for MambularLSS: - -- **Risk Assessment**: In finance or insurance, understanding the range and likelihood of potential losses is as important as predicting average outcomes. -- **Demand Forecasting**: For inventory management, capturing the variability in product demand helps in optimizing stock levels. -- **Personalized Medicine**: In healthcare, distributional regression can predict a range of possible patient responses to a treatment, aiding in personalized therapy planning. - -### Getting Started with MambularLSS: - -To integrate distributional regression into your workflow with `MambularLSS`, start by initializing the model with your desired configuration, similar to other Mambular models: - -```python -from mambular.models import MambularLSS - -# Initialize the MambularLSS model -model = MambularLSS( - dropout=0.2, - d_model=256, - n_layers=4, - -) - -# Fit the model to your data -model.fit( - X, - y, - max_epochs=300, - lr=1e-03, - patience=10, - family="normal" # define your distribution - ) - -``` - diff --git a/docs/_build/html/_sources/quickstart.md.txt b/docs/_build/html/_sources/quickstart.md.txt deleted file mode 100644 index 5004232..0000000 --- a/docs/_build/html/_sources/quickstart.md.txt +++ /dev/null @@ -1,9 +0,0 @@ -# Quickstart - -Example code for implementing the models: - -1. [Classification](https://github.com/basf/mamba-tabular/blob/master/example_classification.py) -2. [Distributional](https://github.com/basf/mamba-tabular/blob/master/example_distributional.py) -3. [Embedding Regression](https://github.com/basf/mamba-tabular/blob/master/example_embedding_regression.py) -4. [Regression](https://github.com/basf/mamba-tabular/blob/master/example_regression.py) - diff --git a/docs/_build/html/_static/_sphinx_javascript_frameworks_compat.js b/docs/_build/html/_static/_sphinx_javascript_frameworks_compat.js deleted file mode 100644 index 8141580..0000000 --- a/docs/_build/html/_static/_sphinx_javascript_frameworks_compat.js +++ /dev/null @@ -1,123 +0,0 @@ -/* Compatability shim for jQuery and underscores.js. - * - * Copyright Sphinx contributors - * Released under the two clause BSD licence - */ - -/** - * small helper function to urldecode strings - * - * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL - */ -jQuery.urldecode = function(x) { - if (!x) { - return x - } - return decodeURIComponent(x.replace(/\+/g, ' ')); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - var bbox = node.parentElement.getBBox(); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} diff --git a/docs/_build/html/_static/basic.css b/docs/_build/html/_static/basic.css deleted file mode 100644 index 7577acb..0000000 --- a/docs/_build/html/_static/basic.css +++ /dev/null @@ -1,903 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -div.section::after { - display: block; - content: ''; - clear: left; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox form.search { - overflow: hidden; -} - -div.sphinxsidebar #searchbox input[type="text"] { - float: left; - width: 80%; - padding: 0.25em; - box-sizing: border-box; -} - -div.sphinxsidebar #searchbox input[type="submit"] { - float: left; - width: 20%; - border-left: none; - padding: 0.25em; - box-sizing: border-box; -} - - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li p.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body { - min-width: 360px; - max-width: 800px; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, figure.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, figure.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, figure.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -img.align-default, figure.align-default, .figure.align-default { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-default { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar, -aside.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px; - background-color: #ffe; - width: 40%; - float: right; - clear: right; - overflow-x: auto; -} - -p.sidebar-title { - font-weight: bold; -} - -nav.contents, -aside.topic, -div.admonition, div.topic, blockquote { - clear: left; -} - -/* -- topics ---------------------------------------------------------------- */ - -nav.contents, -aside.topic, -div.topic { - border: 1px solid #ccc; - padding: 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- content of sidebars/topics/admonitions -------------------------------- */ - -div.sidebar > :last-child, -aside.sidebar > :last-child, -nav.contents > :last-child, -aside.topic > :last-child, -div.topic > :last-child, -div.admonition > :last-child { - margin-bottom: 0; -} - -div.sidebar::after, -aside.sidebar::after, -nav.contents::after, -aside.topic::after, -div.topic::after, -div.admonition::after, -blockquote::after { - display: block; - content: ''; - clear: both; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - margin-top: 10px; - margin-bottom: 10px; - border: 0; - border-collapse: collapse; -} - -table.align-center { - margin-left: auto; - margin-right: auto; -} - -table.align-default { - margin-left: auto; - margin-right: auto; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -th > :first-child, -td > :first-child { - margin-top: 0px; -} - -th > :last-child, -td > :last-child { - margin-bottom: 0px; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure, figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption, figcaption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number, -figcaption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text, -figcaption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -/* -- hlist styles ---------------------------------------------------------- */ - -table.hlist { - margin: 1em 0; -} - -table.hlist td { - vertical-align: top; -} - -/* -- object description styles --------------------------------------------- */ - -.sig { - font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; -} - -.sig-name, code.descname { - background-color: transparent; - font-weight: bold; -} - -.sig-name { - font-size: 1.1em; -} - -code.descname { - font-size: 1.2em; -} - -.sig-prename, code.descclassname { - background-color: transparent; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.sig-param.n { - font-style: italic; -} - -/* C++ specific styling */ - -.sig-inline.c-texpr, -.sig-inline.cpp-texpr { - font-family: unset; -} - -.sig.c .k, .sig.c .kt, -.sig.cpp .k, .sig.cpp .kt { - color: #0033B3; -} - -.sig.c .m, -.sig.cpp .m { - color: #1750EB; -} - -.sig.c .s, .sig.c .sc, -.sig.cpp .s, .sig.cpp .sc { - color: #067D17; -} - - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -:not(li) > ol > li:first-child > :first-child, -:not(li) > ul > li:first-child > :first-child { - margin-top: 0px; -} - -:not(li) > ol > li:last-child > :last-child, -:not(li) > ul > li:last-child > :last-child { - margin-bottom: 0px; -} - -ol.simple ol p, -ol.simple ul p, -ul.simple ol p, -ul.simple ul p { - margin-top: 0; -} - -ol.simple > li:not(:first-child) > p, -ul.simple > li:not(:first-child) > p { - margin-top: 0; -} - -ol.simple p, -ul.simple p { - margin-bottom: 0; -} - -aside.footnote > span, -div.citation > span { - float: left; -} -aside.footnote > span:last-of-type, -div.citation > span:last-of-type { - padding-right: 0.5em; -} -aside.footnote > p { - margin-left: 2em; -} -div.citation > p { - margin-left: 4em; -} -aside.footnote > p:last-of-type, -div.citation > p:last-of-type { - margin-bottom: 0em; -} -aside.footnote > p:last-of-type:after, -div.citation > p:last-of-type:after { - content: ""; - clear: both; -} - -dl.field-list { - display: grid; - grid-template-columns: fit-content(30%) auto; -} - -dl.field-list > dt { - font-weight: bold; - word-break: break-word; - padding-left: 0.5em; - padding-right: 5px; -} - -dl.field-list > dd { - padding-left: 0.5em; - margin-top: 0em; - margin-left: 0em; - margin-bottom: 0em; -} - -dl { - margin-bottom: 15px; -} - -dd > :first-child { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dl > dd:last-child, -dl > dd:last-child > :last-child { - margin-bottom: 0; -} - -dt:target, span.highlighted { - background-color: #fbe54e; -} - -rect.highlighted { - fill: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -.classifier:before { - font-style: normal; - margin: 0 0.5em; - content: ":"; - display: inline-block; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -pre, div[class*="highlight-"] { - clear: both; -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; - white-space: nowrap; -} - -div[class*="highlight-"] { - margin: 1em 0; -} - -td.linenos pre { - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - display: block; -} - -table.highlighttable tbody { - display: block; -} - -table.highlighttable tr { - display: flex; -} - -table.highlighttable td { - margin: 0; - padding: 0; -} - -table.highlighttable td.linenos { - padding-right: 0.5em; -} - -table.highlighttable td.code { - flex: 1; - overflow: hidden; -} - -.highlight .hll { - display: block; -} - -div.highlight pre, -table.highlighttable pre { - margin: 0; -} - -div.code-block-caption + div { - margin-top: 0; -} - -div.code-block-caption { - margin-top: 1em; - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -table.highlighttable td.linenos, -span.linenos, -div.highlight span.gp { /* gp: Generic.Prompt */ - user-select: none; - -webkit-user-select: text; /* Safari fallback only */ - -webkit-user-select: none; /* Chrome/Safari */ - -moz-user-select: none; /* Firefox */ - -ms-user-select: none; /* IE10+ */ -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - margin: 1em 0; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: absolute; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/docs/_build/html/_static/check-solid.svg b/docs/_build/html/_static/check-solid.svg deleted file mode 100644 index 92fad4b..0000000 --- a/docs/_build/html/_static/check-solid.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/docs/_build/html/_static/clipboard.min.js b/docs/_build/html/_static/clipboard.min.js deleted file mode 100644 index 54b3c46..0000000 --- a/docs/_build/html/_static/clipboard.min.js +++ /dev/null @@ -1,7 +0,0 @@ -/*! - * clipboard.js v2.0.8 - * https://clipboardjs.com/ - * - * Licensed MIT © Zeno Rocha - */ -!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.ClipboardJS=e():t.ClipboardJS=e()}(this,function(){return n={686:function(t,e,n){"use strict";n.d(e,{default:function(){return o}});var e=n(279),i=n.n(e),e=n(370),u=n.n(e),e=n(817),c=n.n(e);function a(t){try{return document.execCommand(t)}catch(t){return}}var f=function(t){t=c()(t);return a("cut"),t};var l=function(t){var e,n,o,r=1 - - - - diff --git a/docs/_build/html/_static/copybutton.css b/docs/_build/html/_static/copybutton.css deleted file mode 100644 index f1916ec..0000000 --- a/docs/_build/html/_static/copybutton.css +++ /dev/null @@ -1,94 +0,0 @@ -/* Copy buttons */ -button.copybtn { - position: absolute; - display: flex; - top: .3em; - right: .3em; - width: 1.7em; - height: 1.7em; - opacity: 0; - transition: opacity 0.3s, border .3s, background-color .3s; - user-select: none; - padding: 0; - border: none; - outline: none; - border-radius: 0.4em; - /* The colors that GitHub uses */ - border: #1b1f2426 1px solid; - background-color: #f6f8fa; - color: #57606a; -} - -button.copybtn.success { - border-color: #22863a; - color: #22863a; -} - -button.copybtn svg { - stroke: currentColor; - width: 1.5em; - height: 1.5em; - padding: 0.1em; -} - -div.highlight { - position: relative; -} - -/* Show the copybutton */ -.highlight:hover button.copybtn, button.copybtn.success { - opacity: 1; -} - -.highlight button.copybtn:hover { - background-color: rgb(235, 235, 235); -} - -.highlight button.copybtn:active { - background-color: rgb(187, 187, 187); -} - -/** - * A minimal CSS-only tooltip copied from: - * https://codepen.io/mildrenben/pen/rVBrpK - * - * To use, write HTML like the following: - * - *

Short

- */ - .o-tooltip--left { - position: relative; - } - - .o-tooltip--left:after { - opacity: 0; - visibility: hidden; - position: absolute; - content: attr(data-tooltip); - padding: .2em; - font-size: .8em; - left: -.2em; - background: grey; - color: white; - white-space: nowrap; - z-index: 2; - border-radius: 2px; - transform: translateX(-102%) translateY(0); - transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1); -} - -.o-tooltip--left:hover:after { - display: block; - opacity: 1; - visibility: visible; - transform: translateX(-100%) translateY(0); - transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1); - transition-delay: .5s; -} - -/* By default the copy button shouldn't show up when printing a page */ -@media print { - button.copybtn { - display: none; - } -} diff --git a/docs/_build/html/_static/copybutton.js b/docs/_build/html/_static/copybutton.js deleted file mode 100644 index 2ea7ff3..0000000 --- a/docs/_build/html/_static/copybutton.js +++ /dev/null @@ -1,248 +0,0 @@ -// Localization support -const messages = { - 'en': { - 'copy': 'Copy', - 'copy_to_clipboard': 'Copy to clipboard', - 'copy_success': 'Copied!', - 'copy_failure': 'Failed to copy', - }, - 'es' : { - 'copy': 'Copiar', - 'copy_to_clipboard': 'Copiar al portapapeles', - 'copy_success': '¡Copiado!', - 'copy_failure': 'Error al copiar', - }, - 'de' : { - 'copy': 'Kopieren', - 'copy_to_clipboard': 'In die Zwischenablage kopieren', - 'copy_success': 'Kopiert!', - 'copy_failure': 'Fehler beim Kopieren', - }, - 'fr' : { - 'copy': 'Copier', - 'copy_to_clipboard': 'Copier dans le presse-papier', - 'copy_success': 'Copié !', - 'copy_failure': 'Échec de la copie', - }, - 'ru': { - 'copy': 'Скопировать', - 'copy_to_clipboard': 'Скопировать в буфер', - 'copy_success': 'Скопировано!', - 'copy_failure': 'Не удалось скопировать', - }, - 'zh-CN': { - 'copy': '复制', - 'copy_to_clipboard': '复制到剪贴板', - 'copy_success': '复制成功!', - 'copy_failure': '复制失败', - }, - 'it' : { - 'copy': 'Copiare', - 'copy_to_clipboard': 'Copiato negli appunti', - 'copy_success': 'Copiato!', - 'copy_failure': 'Errore durante la copia', - } -} - -let locale = 'en' -if( document.documentElement.lang !== undefined - && messages[document.documentElement.lang] !== undefined ) { - locale = document.documentElement.lang -} - -let doc_url_root = DOCUMENTATION_OPTIONS.URL_ROOT; -if (doc_url_root == '#') { - doc_url_root = ''; -} - -/** - * SVG files for our copy buttons - */ -let iconCheck = ` - ${messages[locale]['copy_success']} - - -` - -// If the user specified their own SVG use that, otherwise use the default -let iconCopy = ``; -if (!iconCopy) { - iconCopy = ` - ${messages[locale]['copy_to_clipboard']} - - - -` -} - -/** - * Set up copy/paste for code blocks - */ - -const runWhenDOMLoaded = cb => { - if (document.readyState != 'loading') { - cb() - } else if (document.addEventListener) { - document.addEventListener('DOMContentLoaded', cb) - } else { - document.attachEvent('onreadystatechange', function() { - if (document.readyState == 'complete') cb() - }) - } -} - -const codeCellId = index => `codecell${index}` - -// Clears selected text since ClipboardJS will select the text when copying -const clearSelection = () => { - if (window.getSelection) { - window.getSelection().removeAllRanges() - } else if (document.selection) { - document.selection.empty() - } -} - -// Changes tooltip text for a moment, then changes it back -// We want the timeout of our `success` class to be a bit shorter than the -// tooltip and icon change, so that we can hide the icon before changing back. -var timeoutIcon = 2000; -var timeoutSuccessClass = 1500; - -const temporarilyChangeTooltip = (el, oldText, newText) => { - el.setAttribute('data-tooltip', newText) - el.classList.add('success') - // Remove success a little bit sooner than we change the tooltip - // So that we can use CSS to hide the copybutton first - setTimeout(() => el.classList.remove('success'), timeoutSuccessClass) - setTimeout(() => el.setAttribute('data-tooltip', oldText), timeoutIcon) -} - -// Changes the copy button icon for two seconds, then changes it back -const temporarilyChangeIcon = (el) => { - el.innerHTML = iconCheck; - setTimeout(() => {el.innerHTML = iconCopy}, timeoutIcon) -} - -const addCopyButtonToCodeCells = () => { - // If ClipboardJS hasn't loaded, wait a bit and try again. This - // happens because we load ClipboardJS asynchronously. - if (window.ClipboardJS === undefined) { - setTimeout(addCopyButtonToCodeCells, 250) - return - } - - // Add copybuttons to all of our code cells - const COPYBUTTON_SELECTOR = 'div.highlight pre'; - const codeCells = document.querySelectorAll(COPYBUTTON_SELECTOR) - codeCells.forEach((codeCell, index) => { - const id = codeCellId(index) - codeCell.setAttribute('id', id) - - const clipboardButton = id => - `` - codeCell.insertAdjacentHTML('afterend', clipboardButton(id)) - }) - -function escapeRegExp(string) { - return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string -} - -/** - * Removes excluded text from a Node. - * - * @param {Node} target Node to filter. - * @param {string} exclude CSS selector of nodes to exclude. - * @returns {DOMString} Text from `target` with text removed. - */ -function filterText(target, exclude) { - const clone = target.cloneNode(true); // clone as to not modify the live DOM - if (exclude) { - // remove excluded nodes - clone.querySelectorAll(exclude).forEach(node => node.remove()); - } - return clone.innerText; -} - -// Callback when a copy button is clicked. Will be passed the node that was clicked -// should then grab the text and replace pieces of text that shouldn't be used in output -function formatCopyText(textContent, copybuttonPromptText, isRegexp = false, onlyCopyPromptLines = true, removePrompts = true, copyEmptyLines = true, lineContinuationChar = "", hereDocDelim = "") { - var regexp; - var match; - - // Do we check for line continuation characters and "HERE-documents"? - var useLineCont = !!lineContinuationChar - var useHereDoc = !!hereDocDelim - - // create regexp to capture prompt and remaining line - if (isRegexp) { - regexp = new RegExp('^(' + copybuttonPromptText + ')(.*)') - } else { - regexp = new RegExp('^(' + escapeRegExp(copybuttonPromptText) + ')(.*)') - } - - const outputLines = []; - var promptFound = false; - var gotLineCont = false; - var gotHereDoc = false; - const lineGotPrompt = []; - for (const line of textContent.split('\n')) { - match = line.match(regexp) - if (match || gotLineCont || gotHereDoc) { - promptFound = regexp.test(line) - lineGotPrompt.push(promptFound) - if (removePrompts && promptFound) { - outputLines.push(match[2]) - } else { - outputLines.push(line) - } - gotLineCont = line.endsWith(lineContinuationChar) & useLineCont - if (line.includes(hereDocDelim) & useHereDoc) - gotHereDoc = !gotHereDoc - } else if (!onlyCopyPromptLines) { - outputLines.push(line) - } else if (copyEmptyLines && line.trim() === '') { - outputLines.push(line) - } - } - - // If no lines with the prompt were found then just use original lines - if (lineGotPrompt.some(v => v === true)) { - textContent = outputLines.join('\n'); - } - - // Remove a trailing newline to avoid auto-running when pasting - if (textContent.endsWith("\n")) { - textContent = textContent.slice(0, -1) - } - return textContent -} - - -var copyTargetText = (trigger) => { - var target = document.querySelector(trigger.attributes['data-clipboard-target'].value); - - // get filtered text - let exclude = '.linenos'; - - let text = filterText(target, exclude); - return formatCopyText(text, '', false, true, true, true, '', '') -} - - // Initialize with a callback so we can modify the text before copy - const clipboard = new ClipboardJS('.copybtn', {text: copyTargetText}) - - // Update UI with error/success messages - clipboard.on('success', event => { - clearSelection() - temporarilyChangeTooltip(event.trigger, messages[locale]['copy'], messages[locale]['copy_success']) - temporarilyChangeIcon(event.trigger) - }) - - clipboard.on('error', event => { - temporarilyChangeTooltip(event.trigger, messages[locale]['copy'], messages[locale]['copy_failure']) - }) -} - -runWhenDOMLoaded(addCopyButtonToCodeCells) \ No newline at end of file diff --git a/docs/_build/html/_static/copybutton_funcs.js b/docs/_build/html/_static/copybutton_funcs.js deleted file mode 100644 index dbe1aaa..0000000 --- a/docs/_build/html/_static/copybutton_funcs.js +++ /dev/null @@ -1,73 +0,0 @@ -function escapeRegExp(string) { - return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string -} - -/** - * Removes excluded text from a Node. - * - * @param {Node} target Node to filter. - * @param {string} exclude CSS selector of nodes to exclude. - * @returns {DOMString} Text from `target` with text removed. - */ -export function filterText(target, exclude) { - const clone = target.cloneNode(true); // clone as to not modify the live DOM - if (exclude) { - // remove excluded nodes - clone.querySelectorAll(exclude).forEach(node => node.remove()); - } - return clone.innerText; -} - -// Callback when a copy button is clicked. Will be passed the node that was clicked -// should then grab the text and replace pieces of text that shouldn't be used in output -export function formatCopyText(textContent, copybuttonPromptText, isRegexp = false, onlyCopyPromptLines = true, removePrompts = true, copyEmptyLines = true, lineContinuationChar = "", hereDocDelim = "") { - var regexp; - var match; - - // Do we check for line continuation characters and "HERE-documents"? - var useLineCont = !!lineContinuationChar - var useHereDoc = !!hereDocDelim - - // create regexp to capture prompt and remaining line - if (isRegexp) { - regexp = new RegExp('^(' + copybuttonPromptText + ')(.*)') - } else { - regexp = new RegExp('^(' + escapeRegExp(copybuttonPromptText) + ')(.*)') - } - - const outputLines = []; - var promptFound = false; - var gotLineCont = false; - var gotHereDoc = false; - const lineGotPrompt = []; - for (const line of textContent.split('\n')) { - match = line.match(regexp) - if (match || gotLineCont || gotHereDoc) { - promptFound = regexp.test(line) - lineGotPrompt.push(promptFound) - if (removePrompts && promptFound) { - outputLines.push(match[2]) - } else { - outputLines.push(line) - } - gotLineCont = line.endsWith(lineContinuationChar) & useLineCont - if (line.includes(hereDocDelim) & useHereDoc) - gotHereDoc = !gotHereDoc - } else if (!onlyCopyPromptLines) { - outputLines.push(line) - } else if (copyEmptyLines && line.trim() === '') { - outputLines.push(line) - } - } - - // If no lines with the prompt were found then just use original lines - if (lineGotPrompt.some(v => v === true)) { - textContent = outputLines.join('\n'); - } - - // Remove a trailing newline to avoid auto-running when pasting - if (textContent.endsWith("\n")) { - textContent = textContent.slice(0, -1) - } - return textContent -} diff --git a/docs/_build/html/_static/css/badge_only.css b/docs/_build/html/_static/css/badge_only.css deleted file mode 100644 index c718cee..0000000 --- a/docs/_build/html/_static/css/badge_only.css +++ /dev/null @@ -1 +0,0 @@ -.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} \ No newline at end of file diff --git a/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff b/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff deleted file mode 100644 index 6cb6000..0000000 Binary files a/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 b/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 deleted file mode 100644 index 7059e23..0000000 Binary files a/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff b/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff deleted file mode 100644 index f815f63..0000000 Binary files a/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 b/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 deleted file mode 100644 index f2c76e5..0000000 Binary files a/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/fontawesome-webfont.eot b/docs/_build/html/_static/css/fonts/fontawesome-webfont.eot deleted file mode 100644 index e9f60ca..0000000 Binary files a/docs/_build/html/_static/css/fonts/fontawesome-webfont.eot and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/fontawesome-webfont.svg b/docs/_build/html/_static/css/fonts/fontawesome-webfont.svg deleted file mode 100644 index 855c845..0000000 --- a/docs/_build/html/_static/css/fonts/fontawesome-webfont.svg +++ /dev/null @@ -1,2671 +0,0 @@ - - - - -Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 - By ,,, -Copyright Dave Gandy 2016. All rights reserved. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/_build/html/_static/css/fonts/fontawesome-webfont.ttf b/docs/_build/html/_static/css/fonts/fontawesome-webfont.ttf deleted file mode 100644 index 35acda2..0000000 Binary files a/docs/_build/html/_static/css/fonts/fontawesome-webfont.ttf and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff b/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff deleted file mode 100644 index 400014a..0000000 Binary files a/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff2 b/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff2 deleted file mode 100644 index 4d13fc6..0000000 Binary files a/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/lato-bold-italic.woff b/docs/_build/html/_static/css/fonts/lato-bold-italic.woff deleted file mode 100644 index 88ad05b..0000000 Binary files a/docs/_build/html/_static/css/fonts/lato-bold-italic.woff and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/lato-bold-italic.woff2 b/docs/_build/html/_static/css/fonts/lato-bold-italic.woff2 deleted file mode 100644 index c4e3d80..0000000 Binary files a/docs/_build/html/_static/css/fonts/lato-bold-italic.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/lato-bold.woff b/docs/_build/html/_static/css/fonts/lato-bold.woff deleted file mode 100644 index c6dff51..0000000 Binary files a/docs/_build/html/_static/css/fonts/lato-bold.woff and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/lato-bold.woff2 b/docs/_build/html/_static/css/fonts/lato-bold.woff2 deleted file mode 100644 index bb19504..0000000 Binary files a/docs/_build/html/_static/css/fonts/lato-bold.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/lato-normal-italic.woff b/docs/_build/html/_static/css/fonts/lato-normal-italic.woff deleted file mode 100644 index 76114bc..0000000 Binary files a/docs/_build/html/_static/css/fonts/lato-normal-italic.woff and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/lato-normal-italic.woff2 b/docs/_build/html/_static/css/fonts/lato-normal-italic.woff2 deleted file mode 100644 index 3404f37..0000000 Binary files a/docs/_build/html/_static/css/fonts/lato-normal-italic.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/lato-normal.woff b/docs/_build/html/_static/css/fonts/lato-normal.woff deleted file mode 100644 index ae1307f..0000000 Binary files a/docs/_build/html/_static/css/fonts/lato-normal.woff and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/lato-normal.woff2 b/docs/_build/html/_static/css/fonts/lato-normal.woff2 deleted file mode 100644 index 3bf9843..0000000 Binary files a/docs/_build/html/_static/css/fonts/lato-normal.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/css/theme.css b/docs/_build/html/_static/css/theme.css deleted file mode 100644 index 19a446a..0000000 --- a/docs/_build/html/_static/css/theme.css +++ /dev/null @@ -1,4 +0,0 @@ -html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}[hidden],audio:not([controls]){display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;text-decoration:none}ins,mark{color:#000}mark{background:#ff0;font-style:italic;font-weight:700}.rst-content code,.rst-content tt,code,kbd,pre,samp{font-family:monospace,serif;_font-family:courier new,monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:after,q:before{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}dl,ol,ul{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure,form{margin:0}label{cursor:pointer}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type=button],input[type=reset],input[type=submit]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}textarea{resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none!important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{body,html,section{background:none!important}*{box-shadow:none!important;text-shadow:none!important;filter:none!important;-ms-filter:none!important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}.rst-content .toctree-wrapper>p.caption,h2,h3,p{orphans:3;widows:3}.rst-content .toctree-wrapper>p.caption,h2,h3{page-break-after:avoid}}.btn,.fa:before,.icon:before,.rst-content .admonition,.rst-content .admonition-title:before,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .code-block-caption .headerlink:before,.rst-content .danger,.rst-content .eqno .headerlink:before,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-alert,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before,input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week],select,textarea{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}/*! - * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome - * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) - */@font-face{font-family:FontAwesome;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713);src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix&v=4.7.0) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#fontawesomeregular) format("svg");font-weight:400;font-style:normal}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14286em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14286em;width:2.14286em;top:.14286em;text-align:center}.fa-li.fa-lg{left:-1.85714em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa-pull-left.icon,.fa.fa-pull-left,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content .eqno .fa-pull-left.headerlink,.rst-content .fa-pull-left.admonition-title,.rst-content code.download span.fa-pull-left:first-child,.rst-content dl dt .fa-pull-left.headerlink,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content p .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.wy-menu-vertical li.current>a button.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-left.toctree-expand,.wy-menu-vertical li button.fa-pull-left.toctree-expand{margin-right:.3em}.fa-pull-right.icon,.fa.fa-pull-right,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content .eqno .fa-pull-right.headerlink,.rst-content .fa-pull-right.admonition-title,.rst-content code.download span.fa-pull-right:first-child,.rst-content dl dt .fa-pull-right.headerlink,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content p .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.wy-menu-vertical li.current>a button.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-right.toctree-expand,.wy-menu-vertical li button.fa-pull-right.toctree-expand{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.pull-left.icon,.rst-content .code-block-caption .pull-left.headerlink,.rst-content .eqno .pull-left.headerlink,.rst-content .pull-left.admonition-title,.rst-content code.download span.pull-left:first-child,.rst-content dl dt .pull-left.headerlink,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content p .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.wy-menu-vertical li.current>a button.pull-left.toctree-expand,.wy-menu-vertical li.on a button.pull-left.toctree-expand,.wy-menu-vertical li button.pull-left.toctree-expand{margin-right:.3em}.fa.pull-right,.pull-right.icon,.rst-content .code-block-caption .pull-right.headerlink,.rst-content .eqno .pull-right.headerlink,.rst-content .pull-right.admonition-title,.rst-content code.download span.pull-right:first-child,.rst-content dl dt .pull-right.headerlink,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content p .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.wy-menu-vertical li.current>a button.pull-right.toctree-expand,.wy-menu-vertical li.on a button.pull-right.toctree-expand,.wy-menu-vertical li button.pull-right.toctree-expand{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);-ms-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);-ms-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-close:before,.fa-remove:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-cog:before,.fa-gear:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-repeat:before,.fa-rotate-right:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.rst-content .admonition-title:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-exclamation-triangle:before,.fa-warning:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-cogs:before,.fa-gears:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-floppy-o:before,.fa-save:before{content:""}.fa-square:before{content:""}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.icon-caret-down:before,.wy-dropdown .caret:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-sort:before,.fa-unsorted:before{content:""}.fa-sort-desc:before,.fa-sort-down:before{content:""}.fa-sort-asc:before,.fa-sort-up:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-gavel:before,.fa-legal:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-bolt:before,.fa-flash:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-clipboard:before,.fa-paste:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-chain-broken:before,.fa-unlink:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:""}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:""}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:""}.fa-eur:before,.fa-euro:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-inr:before,.fa-rupee:before{content:""}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:""}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:""}.fa-krw:before,.fa-won:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-try:before,.fa-turkish-lira:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li button.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-bank:before,.fa-institution:before,.fa-university:before{content:""}.fa-graduation-cap:before,.fa-mortar-board:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:""}.fa-file-archive-o:before,.fa-file-zip-o:before{content:""}.fa-file-audio-o:before,.fa-file-sound-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:""}.fa-empire:before,.fa-ge:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-paper-plane:before,.fa-send:before{content:""}.fa-paper-plane-o:before,.fa-send-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-bed:before,.fa-hotel:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-y-combinator:before,.fa-yc:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-television:before,.fa-tv:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:""}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-sign-language:before,.fa-signing:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-address-card:before,.fa-vcard:before{content:""}.fa-address-card-o:before,.fa-vcard-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{font-family:inherit}.fa:before,.icon:before,.rst-content .admonition-title:before,.rst-content .code-block-caption .headerlink:before,.rst-content .eqno .headerlink:before,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before{font-family:FontAwesome;display:inline-block;font-style:normal;font-weight:400;line-height:1;text-decoration:inherit}.rst-content .code-block-caption a .headerlink,.rst-content .eqno a .headerlink,.rst-content a .admonition-title,.rst-content code.download a span:first-child,.rst-content dl dt a .headerlink,.rst-content h1 a .headerlink,.rst-content h2 a .headerlink,.rst-content h3 a .headerlink,.rst-content h4 a .headerlink,.rst-content h5 a .headerlink,.rst-content h6 a .headerlink,.rst-content p.caption a .headerlink,.rst-content p a .headerlink,.rst-content table>caption a .headerlink,.rst-content tt.download a span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li a button.toctree-expand,a .fa,a .icon,a .rst-content .admonition-title,a .rst-content .code-block-caption .headerlink,a .rst-content .eqno .headerlink,a .rst-content code.download span:first-child,a .rst-content dl dt .headerlink,a .rst-content h1 .headerlink,a .rst-content h2 .headerlink,a .rst-content h3 .headerlink,a .rst-content h4 .headerlink,a .rst-content h5 .headerlink,a .rst-content h6 .headerlink,a .rst-content p.caption .headerlink,a .rst-content p .headerlink,a .rst-content table>caption .headerlink,a .rst-content tt.download span:first-child,a .wy-menu-vertical li button.toctree-expand{display:inline-block;text-decoration:inherit}.btn .fa,.btn .icon,.btn .rst-content .admonition-title,.btn .rst-content .code-block-caption .headerlink,.btn .rst-content .eqno .headerlink,.btn .rst-content code.download span:first-child,.btn .rst-content dl dt .headerlink,.btn .rst-content h1 .headerlink,.btn .rst-content h2 .headerlink,.btn .rst-content h3 .headerlink,.btn .rst-content h4 .headerlink,.btn .rst-content h5 .headerlink,.btn .rst-content h6 .headerlink,.btn .rst-content p .headerlink,.btn .rst-content table>caption .headerlink,.btn .rst-content tt.download span:first-child,.btn .wy-menu-vertical li.current>a button.toctree-expand,.btn .wy-menu-vertical li.on a button.toctree-expand,.btn .wy-menu-vertical li button.toctree-expand,.nav .fa,.nav .icon,.nav .rst-content .admonition-title,.nav .rst-content .code-block-caption .headerlink,.nav .rst-content .eqno .headerlink,.nav .rst-content code.download span:first-child,.nav .rst-content dl dt .headerlink,.nav .rst-content h1 .headerlink,.nav .rst-content h2 .headerlink,.nav .rst-content h3 .headerlink,.nav .rst-content h4 .headerlink,.nav .rst-content h5 .headerlink,.nav .rst-content h6 .headerlink,.nav .rst-content p .headerlink,.nav .rst-content table>caption .headerlink,.nav .rst-content tt.download span:first-child,.nav .wy-menu-vertical li.current>a button.toctree-expand,.nav .wy-menu-vertical li.on a button.toctree-expand,.nav .wy-menu-vertical li button.toctree-expand,.rst-content .btn .admonition-title,.rst-content .code-block-caption .btn .headerlink,.rst-content .code-block-caption .nav .headerlink,.rst-content .eqno .btn .headerlink,.rst-content .eqno .nav .headerlink,.rst-content .nav .admonition-title,.rst-content code.download .btn span:first-child,.rst-content code.download .nav span:first-child,.rst-content dl dt .btn .headerlink,.rst-content dl dt .nav .headerlink,.rst-content h1 .btn .headerlink,.rst-content h1 .nav .headerlink,.rst-content h2 .btn .headerlink,.rst-content h2 .nav .headerlink,.rst-content h3 .btn .headerlink,.rst-content h3 .nav .headerlink,.rst-content h4 .btn .headerlink,.rst-content h4 .nav .headerlink,.rst-content h5 .btn .headerlink,.rst-content h5 .nav .headerlink,.rst-content h6 .btn .headerlink,.rst-content h6 .nav .headerlink,.rst-content p .btn .headerlink,.rst-content p .nav .headerlink,.rst-content table>caption .btn .headerlink,.rst-content table>caption .nav .headerlink,.rst-content tt.download .btn span:first-child,.rst-content tt.download .nav span:first-child,.wy-menu-vertical li .btn button.toctree-expand,.wy-menu-vertical li.current>a .btn button.toctree-expand,.wy-menu-vertical li.current>a .nav button.toctree-expand,.wy-menu-vertical li .nav button.toctree-expand,.wy-menu-vertical li.on a .btn button.toctree-expand,.wy-menu-vertical li.on a .nav button.toctree-expand{display:inline}.btn .fa-large.icon,.btn .fa.fa-large,.btn .rst-content .code-block-caption .fa-large.headerlink,.btn .rst-content .eqno .fa-large.headerlink,.btn .rst-content .fa-large.admonition-title,.btn .rst-content code.download span.fa-large:first-child,.btn .rst-content dl dt .fa-large.headerlink,.btn .rst-content h1 .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.btn .rst-content p .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.btn .wy-menu-vertical li button.fa-large.toctree-expand,.nav .fa-large.icon,.nav .fa.fa-large,.nav .rst-content .code-block-caption .fa-large.headerlink,.nav .rst-content .eqno .fa-large.headerlink,.nav .rst-content .fa-large.admonition-title,.nav .rst-content code.download span.fa-large:first-child,.nav .rst-content dl dt .fa-large.headerlink,.nav .rst-content h1 .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.nav .rst-content p .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.nav .wy-menu-vertical li button.fa-large.toctree-expand,.rst-content .btn .fa-large.admonition-title,.rst-content .code-block-caption .btn .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.rst-content .eqno .btn .fa-large.headerlink,.rst-content .eqno .nav .fa-large.headerlink,.rst-content .nav .fa-large.admonition-title,.rst-content code.download .btn span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.rst-content dl dt .btn .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.rst-content p .btn .fa-large.headerlink,.rst-content p .nav .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.rst-content tt.download .btn span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.wy-menu-vertical li .btn button.fa-large.toctree-expand,.wy-menu-vertical li .nav button.fa-large.toctree-expand{line-height:.9em}.btn .fa-spin.icon,.btn .fa.fa-spin,.btn .rst-content .code-block-caption .fa-spin.headerlink,.btn .rst-content .eqno .fa-spin.headerlink,.btn .rst-content .fa-spin.admonition-title,.btn .rst-content code.download span.fa-spin:first-child,.btn .rst-content dl dt .fa-spin.headerlink,.btn .rst-content h1 .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.btn .rst-content p .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.btn .wy-menu-vertical li button.fa-spin.toctree-expand,.nav .fa-spin.icon,.nav .fa.fa-spin,.nav .rst-content .code-block-caption .fa-spin.headerlink,.nav .rst-content .eqno .fa-spin.headerlink,.nav .rst-content .fa-spin.admonition-title,.nav .rst-content code.download span.fa-spin:first-child,.nav .rst-content dl dt .fa-spin.headerlink,.nav .rst-content h1 .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.nav .rst-content p .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.nav .wy-menu-vertical li button.fa-spin.toctree-expand,.rst-content .btn .fa-spin.admonition-title,.rst-content .code-block-caption .btn .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.rst-content .eqno .btn .fa-spin.headerlink,.rst-content .eqno .nav .fa-spin.headerlink,.rst-content .nav .fa-spin.admonition-title,.rst-content code.download .btn span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.rst-content dl dt .btn .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.rst-content p .btn .fa-spin.headerlink,.rst-content p .nav .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.rst-content tt.download .btn span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.wy-menu-vertical li .btn button.fa-spin.toctree-expand,.wy-menu-vertical li .nav button.fa-spin.toctree-expand{display:inline-block}.btn.fa:before,.btn.icon:before,.rst-content .btn.admonition-title:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content .eqno .btn.headerlink:before,.rst-content code.download span.btn:first-child:before,.rst-content dl dt .btn.headerlink:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content p .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.wy-menu-vertical li button.btn.toctree-expand:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.btn.icon:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content .eqno .btn.headerlink:hover:before,.rst-content code.download span.btn:first-child:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content p .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.wy-menu-vertical li button.btn.toctree-expand:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .icon:before,.btn-mini .rst-content .admonition-title:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.btn-mini .rst-content .eqno .headerlink:before,.btn-mini .rst-content code.download span:first-child:before,.btn-mini .rst-content dl dt .headerlink:before,.btn-mini .rst-content h1 .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.btn-mini .rst-content p .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.btn-mini .wy-menu-vertical li button.toctree-expand:before,.rst-content .btn-mini .admonition-title:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.rst-content .eqno .btn-mini .headerlink:before,.rst-content code.download .btn-mini span:first-child:before,.rst-content dl dt .btn-mini .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.rst-content p .btn-mini .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.rst-content tt.download .btn-mini span:first-child:before,.wy-menu-vertical li .btn-mini button.toctree-expand:before{font-size:14px;vertical-align:-15%}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.wy-alert{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.rst-content .admonition-title,.wy-alert-title{font-weight:700;display:block;color:#fff;background:#6ab0de;padding:6px 12px;margin:-12px -12px 12px}.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.admonition,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.wy-alert.wy-alert-danger{background:#fdf3f2}.rst-content .danger .admonition-title,.rst-content .danger .wy-alert-title,.rst-content .error .admonition-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .admonition-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.wy-alert.wy-alert-danger .wy-alert-title{background:#f29f97}.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .warning,.rst-content .wy-alert-warning.admonition,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.note,.rst-content .wy-alert-warning.seealso,.rst-content .wy-alert-warning.tip,.wy-alert.wy-alert-warning{background:#ffedcc}.rst-content .admonition-todo .admonition-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .attention .admonition-title,.rst-content .attention .wy-alert-title,.rst-content .caution .admonition-title,.rst-content .caution .wy-alert-title,.rst-content .warning .admonition-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.admonition .admonition-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.wy-alert.wy-alert-warning .wy-alert-title{background:#f0b37e}.rst-content .note,.rst-content .seealso,.rst-content .wy-alert-info.admonition,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.wy-alert.wy-alert-info{background:#e7f2fa}.rst-content .note .admonition-title,.rst-content .note .wy-alert-title,.rst-content .seealso .admonition-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .admonition-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.wy-alert.wy-alert-info .wy-alert-title{background:#6ab0de}.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.admonition,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.warning,.wy-alert.wy-alert-success{background:#dbfaf4}.rst-content .hint .admonition-title,.rst-content .hint .wy-alert-title,.rst-content .important .admonition-title,.rst-content .important .wy-alert-title,.rst-content .tip .admonition-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .admonition-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.wy-alert.wy-alert-success .wy-alert-title{background:#1abc9c}.rst-content .wy-alert-neutral.admonition,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.wy-alert.wy-alert-neutral{background:#f3f6f6}.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .admonition-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.wy-alert.wy-alert-neutral .wy-alert-title{color:#404040;background:#e1e4e5}.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.wy-alert.wy-alert-neutral a{color:#2980b9}.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .note p:last-child,.rst-content .seealso p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.wy-alert p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27ae60}.wy-tray-container li.wy-tray-item-info{background:#2980b9}.wy-tray-container li.wy-tray-item-warning{background:#e67e22}.wy-tray-container li.wy-tray-item-danger{background:#e74c3c}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width:768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px;color:#fff;border:1px solid rgba(0,0,0,.1);background-color:#27ae60;text-decoration:none;font-weight:400;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 2px -1px hsla(0,0%,100%,.5),inset 0 -2px 0 0 rgba(0,0,0,.1);outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:inset 0 -1px 0 0 rgba(0,0,0,.05),inset 0 2px 0 0 rgba(0,0,0,.1);padding:8px 12px 6px}.btn:visited{color:#fff}.btn-disabled,.btn-disabled:active,.btn-disabled:focus,.btn-disabled:hover,.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980b9!important}.btn-info:hover{background-color:#2e8ece!important}.btn-neutral{background-color:#f3f6f6!important;color:#404040!important}.btn-neutral:hover{background-color:#e5ebeb!important;color:#404040}.btn-neutral:visited{color:#404040!important}.btn-success{background-color:#27ae60!important}.btn-success:hover{background-color:#295!important}.btn-danger{background-color:#e74c3c!important}.btn-danger:hover{background-color:#ea6153!important}.btn-warning{background-color:#e67e22!important}.btn-warning:hover{background-color:#e98b39!important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f!important}.btn-link{background-color:transparent!important;color:#2980b9;box-shadow:none;border-color:transparent!important}.btn-link:active,.btn-link:hover{background-color:transparent!important;color:#409ad5!important;box-shadow:none}.btn-link:visited{color:#9b59b6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:after,.wy-btn-group:before{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:1px solid #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980b9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:1px solid #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type=search]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980b9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned .wy-help-inline,.wy-form-aligned input,.wy-form-aligned label,.wy-form-aligned select,.wy-form-aligned textarea{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{margin:0}fieldset,legend{border:0;padding:0}legend{width:100%;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label,legend{display:block}label{margin:0 0 .3125em;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;max-width:1200px;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:after,.wy-control-group:before{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#e74c3c}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full input[type=color],.wy-control-group .wy-form-full input[type=date],.wy-control-group .wy-form-full input[type=datetime-local],.wy-control-group .wy-form-full input[type=datetime],.wy-control-group .wy-form-full input[type=email],.wy-control-group .wy-form-full input[type=month],.wy-control-group .wy-form-full input[type=number],.wy-control-group .wy-form-full input[type=password],.wy-control-group .wy-form-full input[type=search],.wy-control-group .wy-form-full input[type=tel],.wy-control-group .wy-form-full input[type=text],.wy-control-group .wy-form-full input[type=time],.wy-control-group .wy-form-full input[type=url],.wy-control-group .wy-form-full input[type=week],.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves input[type=color],.wy-control-group .wy-form-halves input[type=date],.wy-control-group .wy-form-halves input[type=datetime-local],.wy-control-group .wy-form-halves input[type=datetime],.wy-control-group .wy-form-halves input[type=email],.wy-control-group .wy-form-halves input[type=month],.wy-control-group .wy-form-halves input[type=number],.wy-control-group .wy-form-halves input[type=password],.wy-control-group .wy-form-halves input[type=search],.wy-control-group .wy-form-halves input[type=tel],.wy-control-group .wy-form-halves input[type=text],.wy-control-group .wy-form-halves input[type=time],.wy-control-group .wy-form-halves input[type=url],.wy-control-group .wy-form-halves input[type=week],.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds input[type=color],.wy-control-group .wy-form-thirds input[type=date],.wy-control-group .wy-form-thirds input[type=datetime-local],.wy-control-group .wy-form-thirds input[type=datetime],.wy-control-group .wy-form-thirds input[type=email],.wy-control-group .wy-form-thirds input[type=month],.wy-control-group .wy-form-thirds input[type=number],.wy-control-group .wy-form-thirds input[type=password],.wy-control-group .wy-form-thirds input[type=search],.wy-control-group .wy-form-thirds input[type=tel],.wy-control-group .wy-form-thirds input[type=text],.wy-control-group .wy-form-thirds input[type=time],.wy-control-group .wy-form-thirds input[type=url],.wy-control-group .wy-form-thirds input[type=week],.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full{float:left;display:block;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.35765%;width:48.82117%}.wy-control-group .wy-form-halves:last-child,.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(odd){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.35765%;width:31.76157%}.wy-control-group .wy-form-thirds:last-child,.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control,.wy-control-no-input{margin:6px 0 0;font-size:90%}.wy-control-no-input{display:inline-block}.wy-control-group.fluid-input input[type=color],.wy-control-group.fluid-input input[type=date],.wy-control-group.fluid-input input[type=datetime-local],.wy-control-group.fluid-input input[type=datetime],.wy-control-group.fluid-input input[type=email],.wy-control-group.fluid-input input[type=month],.wy-control-group.fluid-input input[type=number],.wy-control-group.fluid-input input[type=password],.wy-control-group.fluid-input input[type=search],.wy-control-group.fluid-input input[type=tel],.wy-control-group.fluid-input input[type=text],.wy-control-group.fluid-input input[type=time],.wy-control-group.fluid-input input[type=url],.wy-control-group.fluid-input input[type=week]{width:100%}.wy-form-message-inline{padding-left:.3em;color:#666;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;*overflow:visible}input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type=datetime-local]{padding:.34375em .625em}input[disabled]{cursor:default}input[type=checkbox],input[type=radio]{padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type=checkbox],input[type=radio],input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}input[type=color]:focus,input[type=date]:focus,input[type=datetime-local]:focus,input[type=datetime]:focus,input[type=email]:focus,input[type=month]:focus,input[type=number]:focus,input[type=password]:focus,input[type=search]:focus,input[type=tel]:focus,input[type=text]:focus,input[type=time]:focus,input[type=url]:focus,input[type=week]:focus{outline:0;outline:thin dotted\9;border-color:#333}input.no-focus:focus{border-color:#ccc!important}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:thin dotted #333;outline:1px auto #129fea}input[type=color][disabled],input[type=date][disabled],input[type=datetime-local][disabled],input[type=datetime][disabled],input[type=email][disabled],input[type=month][disabled],input[type=number][disabled],input[type=password][disabled],input[type=search][disabled],input[type=tel][disabled],input[type=text][disabled],input[type=time][disabled],input[type=url][disabled],input[type=week][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,select:focus:invalid,textarea:focus:invalid{color:#e74c3c;border:1px solid #e74c3c}input:focus:invalid:focus,select:focus:invalid:focus,textarea:focus:invalid:focus{border-color:#e74c3c}input[type=checkbox]:focus:invalid:focus,input[type=file]:focus:invalid:focus,input[type=radio]:focus:invalid:focus{outline-color:#e74c3c}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}input[readonly],select[disabled],select[readonly],textarea[disabled],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type=checkbox][disabled],input[type=radio][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:1px solid #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{left:0;top:0;width:36px;height:12px;background:#ccc}.wy-switch:after,.wy-switch:before{position:absolute;content:"";display:block;border-radius:4px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{width:18px;height:18px;background:#999;left:-3px;top:-3px}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27ae60}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#e74c3c}.wy-control-group.wy-control-group-error input[type=color],.wy-control-group.wy-control-group-error input[type=date],.wy-control-group.wy-control-group-error input[type=datetime-local],.wy-control-group.wy-control-group-error input[type=datetime],.wy-control-group.wy-control-group-error input[type=email],.wy-control-group.wy-control-group-error input[type=month],.wy-control-group.wy-control-group-error input[type=number],.wy-control-group.wy-control-group-error input[type=password],.wy-control-group.wy-control-group-error input[type=search],.wy-control-group.wy-control-group-error input[type=tel],.wy-control-group.wy-control-group-error input[type=text],.wy-control-group.wy-control-group-error input[type=time],.wy-control-group.wy-control-group-error input[type=url],.wy-control-group.wy-control-group-error input[type=week],.wy-control-group.wy-control-group-error textarea{border:1px solid #e74c3c}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27ae60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#e74c3c}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#e67e22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980b9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width:480px){.wy-form button[type=submit]{margin:.7em 0 0}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=text],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week],.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0}.wy-form-message,.wy-form-message-inline,.wy-form .wy-help-inline{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width:768px){.tablet-hide{display:none}}@media screen and (max-width:480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.rst-content table.docutils,.rst-content table.field-list,.wy-table{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.rst-content table.docutils caption,.rst-content table.field-list caption,.wy-table caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.rst-content table.docutils td,.rst-content table.docutils th,.rst-content table.field-list td,.rst-content table.field-list th,.wy-table td,.wy-table th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.rst-content table.docutils td:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list td:first-child,.rst-content table.field-list th:first-child,.wy-table td:first-child,.wy-table th:first-child{border-left-width:0}.rst-content table.docutils thead,.rst-content table.field-list thead,.wy-table thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.rst-content table.docutils thead th,.rst-content table.field-list thead th,.wy-table thead th{font-weight:700;border-bottom:2px solid #e1e4e5}.rst-content table.docutils td,.rst-content table.field-list td,.wy-table td{background-color:transparent;vertical-align:middle}.rst-content table.docutils td p,.rst-content table.field-list td p,.wy-table td p{line-height:18px}.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child,.wy-table td p:last-child{margin-bottom:0}.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min,.wy-table .wy-table-cell-min{width:1%;padding-right:0}.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:grey;font-size:90%}.wy-table-tertiary{color:grey;font-size:80%}.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td,.wy-table-backed,.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td{background-color:#f3f6f6}.rst-content table.docutils,.wy-table-bordered-all{border:1px solid #e1e4e5}.rst-content table.docutils td,.wy-table-bordered-all td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.rst-content table.docutils tbody>tr:last-child td,.wy-table-bordered-all tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0!important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980b9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9b59b6}html{height:100%}body,html{overflow-x:hidden}body{font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;font-weight:400;color:#404040;min-height:100%;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#e67e22!important}a.wy-text-warning:hover{color:#eb9950!important}.wy-text-info{color:#2980b9!important}a.wy-text-info:hover{color:#409ad5!important}.wy-text-success{color:#27ae60!important}a.wy-text-success:hover{color:#36d278!important}.wy-text-danger{color:#e74c3c!important}a.wy-text-danger:hover{color:#ed7669!important}.wy-text-neutral{color:#404040!important}a.wy-text-neutral:hover{color:#595959!important}.rst-content .toctree-wrapper>p.caption,h1,h2,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif}p{line-height:24px;font-size:16px;margin:0 0 24px}h1{font-size:175%}.rst-content .toctree-wrapper>p.caption,h2{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}.rst-content code,.rst-content tt,code{white-space:nowrap;max-width:100%;background:#fff;border:1px solid #e1e4e5;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#e74c3c;overflow-x:auto}.rst-content tt.code-large,code.code-large{font-size:90%}.rst-content .section ul,.rst-content .toctree-wrapper ul,.rst-content section ul,.wy-plain-list-disc,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.rst-content .section ul li,.rst-content .toctree-wrapper ul li,.rst-content section ul li,.wy-plain-list-disc li,article ul li{list-style:disc;margin-left:24px}.rst-content .section ul li p:last-child,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li p:last-child,.rst-content .toctree-wrapper ul li ul,.rst-content section ul li p:last-child,.rst-content section ul li ul,.wy-plain-list-disc li p:last-child,.wy-plain-list-disc li ul,article ul li p:last-child,article ul li ul{margin-bottom:0}.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,.rst-content section ul li li,.wy-plain-list-disc li li,article ul li li{list-style:circle}.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,.rst-content section ul li li li,.wy-plain-list-disc li li li,article ul li li li{list-style:square}.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,.rst-content section ul li ol li,.wy-plain-list-disc li ol li,article ul li ol li{list-style:decimal}.rst-content .section ol,.rst-content .section ol.arabic,.rst-content .toctree-wrapper ol,.rst-content .toctree-wrapper ol.arabic,.rst-content section ol,.rst-content section ol.arabic,.wy-plain-list-decimal,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.rst-content .section ol.arabic li,.rst-content .section ol li,.rst-content .toctree-wrapper ol.arabic li,.rst-content .toctree-wrapper ol li,.rst-content section ol.arabic li,.rst-content section ol li,.wy-plain-list-decimal li,article ol li{list-style:decimal;margin-left:24px}.rst-content .section ol.arabic li ul,.rst-content .section ol li p:last-child,.rst-content .section ol li ul,.rst-content .toctree-wrapper ol.arabic li ul,.rst-content .toctree-wrapper ol li p:last-child,.rst-content .toctree-wrapper ol li ul,.rst-content section ol.arabic li ul,.rst-content section ol li p:last-child,.rst-content section ol li ul,.wy-plain-list-decimal li p:last-child,.wy-plain-list-decimal li ul,article ol li p:last-child,article ol li ul{margin-bottom:0}.rst-content .section ol.arabic li ul li,.rst-content .section ol li ul li,.rst-content .toctree-wrapper ol.arabic li ul li,.rst-content .toctree-wrapper ol li ul li,.rst-content section ol.arabic li ul li,.rst-content section ol li ul li,.wy-plain-list-decimal li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:after,.wy-breadcrumbs:before{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs>li{display:inline-block;padding-top:5px}.wy-breadcrumbs>li.wy-breadcrumbs-aside{float:right}.rst-content .wy-breadcrumbs>li code,.rst-content .wy-breadcrumbs>li tt,.wy-breadcrumbs>li .rst-content tt,.wy-breadcrumbs>li code{all:inherit;color:inherit}.breadcrumb-item:before{content:"/";color:#bbb;font-size:13px;padding:0 6px 0 3px}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width:480px){.wy-breadcrumbs-extra,.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:after,.wy-menu-horiz:before{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz li,.wy-menu-horiz ul{display:inline-block}.wy-menu-horiz li:hover{background:hsla(0,0%,100%,.1)}.wy-menu-horiz li.divide-left{border-left:1px solid #404040}.wy-menu-horiz li.divide-right{border-right:1px solid #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#55a5d9;height:32px;line-height:32px;padding:0 1.618em;margin:12px 0 0;display:block;font-weight:700;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:1px solid #404040}.wy-menu-vertical li.divide-bottom{border-bottom:1px solid #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:grey;border-right:1px solid #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.rst-content .wy-menu-vertical li tt,.wy-menu-vertical li .rst-content tt,.wy-menu-vertical li code{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li button.toctree-expand{display:block;float:left;margin-left:-1.2em;line-height:18px;color:#4d4d4d;border:none;background:none;padding:0}.wy-menu-vertical li.current>a,.wy-menu-vertical li.on a{color:#404040;font-weight:700;position:relative;background:#fcfcfc;border:none;padding:.4045em 1.618em}.wy-menu-vertical li.current>a:hover,.wy-menu-vertical li.on a:hover{background:#fcfcfc}.wy-menu-vertical li.current>a:hover button.toctree-expand,.wy-menu-vertical li.on a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand{display:block;line-height:18px;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:1px solid #c9c9c9;border-top:1px solid #c9c9c9}.wy-menu-vertical .toctree-l1.current .toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .toctree-l11>ul{display:none}.wy-menu-vertical .toctree-l1.current .current.toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .current.toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .current.toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .current.toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .current.toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .current.toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .current.toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .current.toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .current.toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .current.toctree-l11>ul{display:block}.wy-menu-vertical li.toctree-l3,.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a,.wy-menu-vertical li.toctree-l5 a,.wy-menu-vertical li.toctree-l6 a,.wy-menu-vertical li.toctree-l7 a,.wy-menu-vertical li.toctree-l8 a,.wy-menu-vertical li.toctree-l9 a,.wy-menu-vertical li.toctree-l10 a{color:#404040}.wy-menu-vertical li.toctree-l2 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l3 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l4 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l5 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l6 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l7 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l8 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l9 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l10 a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{display:block}.wy-menu-vertical li.toctree-l2.current>a{padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{padding:.4045em 1.618em .4045em 4.045em}.wy-menu-vertical li.toctree-l3.current>a{padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{padding:.4045em 1.618em .4045em 5.663em}.wy-menu-vertical li.toctree-l4.current>a{padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a{padding:.4045em 1.618em .4045em 7.281em}.wy-menu-vertical li.toctree-l5.current>a{padding:.4045em 7.281em}.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a{padding:.4045em 1.618em .4045em 8.899em}.wy-menu-vertical li.toctree-l6.current>a{padding:.4045em 8.899em}.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a{padding:.4045em 1.618em .4045em 10.517em}.wy-menu-vertical li.toctree-l7.current>a{padding:.4045em 10.517em}.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a{padding:.4045em 1.618em .4045em 12.135em}.wy-menu-vertical li.toctree-l8.current>a{padding:.4045em 12.135em}.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a{padding:.4045em 1.618em .4045em 13.753em}.wy-menu-vertical li.toctree-l9.current>a{padding:.4045em 13.753em}.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a{padding:.4045em 1.618em .4045em 15.371em}.wy-menu-vertical li.toctree-l10.current>a{padding:.4045em 15.371em}.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{padding:.4045em 1.618em .4045em 16.989em}.wy-menu-vertical li.toctree-l2.current>a,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{background:#c9c9c9}.wy-menu-vertical li.toctree-l2 button.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3.current>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{background:#bdbdbd}.wy-menu-vertical li.toctree-l3 button.toctree-expand{color:#969696}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:400}.wy-menu-vertical a{line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#d9d9d9}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover button.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#2980b9;cursor:pointer;color:#fff}.wy-menu-vertical a:active button.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980b9;text-align:center;color:#fcfcfc}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a{color:#fcfcfc;font-size:100%;font-weight:700;display:inline-block;padding:4px 6px;margin-bottom:.809em;max-width:100%}.wy-side-nav-search .wy-dropdown>a:hover,.wy-side-nav-search>a:hover{background:hsla(0,0%,100%,.1)}.wy-side-nav-search .wy-dropdown>a img.logo,.wy-side-nav-search>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search .wy-dropdown>a.icon img.logo,.wy-side-nav-search>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:400;color:hsla(0,0%,100%,.3)}.wy-nav .wy-menu-vertical header{color:#2980b9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980b9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#fcfcfc}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980b9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:after,.wy-nav-top:before{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:700}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:grey}footer p{margin-bottom:12px}.rst-content footer span.commit tt,footer span.commit .rst-content tt,footer span.commit code{padding:0;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:1em;background:none;border:none;color:grey}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:after,.rst-footer-buttons:before{width:100%;display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:after,.rst-breadcrumbs-buttons:before{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:1px solid #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:1px solid #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:grey;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width:768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-menu.wy-menu-vertical,.wy-side-nav-search,.wy-side-scroll{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width:1100px){.wy-nav-content-wrap{background:rgba(0,0,0,.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,.wy-nav-side,footer{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60;*zoom:1}.rst-versions .rst-current-version:after,.rst-versions .rst-current-version:before{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-content .eqno .rst-versions .rst-current-version .headerlink,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-content p .rst-versions .rst-current-version .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .icon,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-versions .rst-current-version .rst-content .eqno .headerlink,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-versions .rst-current-version .rst-content p .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-versions .rst-current-version .wy-menu-vertical li button.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version button.toctree-expand{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content .toctree-wrapper>p.caption,.rst-content h1,.rst-content h2,.rst-content h3,.rst-content h4,.rst-content h5,.rst-content h6{margin-bottom:24px}.rst-content img{max-width:100%;height:auto}.rst-content div.figure,.rst-content figure{margin-bottom:24px}.rst-content div.figure .caption-text,.rst-content figure .caption-text{font-style:italic}.rst-content div.figure p:last-child.caption,.rst-content figure p:last-child.caption{margin-bottom:0}.rst-content div.figure.align-center,.rst-content figure.align-center{text-align:center}.rst-content .section>a>img,.rst-content .section>img,.rst-content section>a>img,.rst-content section>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"\f08e";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;display:block;overflow:auto}.rst-content div[class^=highlight],.rst-content pre.literal-block{border:1px solid #e1e4e5;overflow-x:auto;margin:1px 0 24px}.rst-content div[class^=highlight] div[class^=highlight],.rst-content pre.literal-block div[class^=highlight]{padding:0;border:none;margin:0}.rst-content div[class^=highlight] td.code{width:100%}.rst-content .linenodiv pre{border-right:1px solid #e6e9ea;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^=highlight] pre{white-space:pre;margin:0;padding:12px;display:block;overflow:auto}.rst-content div[class^=highlight] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content .linenodiv pre,.rst-content div[class^=highlight] pre,.rst-content pre.literal-block{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.4}.rst-content div.highlight .gp,.rst-content div.highlight span.linenos{user-select:none;pointer-events:none}.rst-content div.highlight span.linenos{display:inline-block;padding-left:0;padding-right:12px;margin-right:12px;border-right:1px solid #e6e9ea}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^=highlight],.rst-content div[class^=highlight] pre{white-space:pre-wrap}}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning{clear:both}.rst-content .admonition-todo .last,.rst-content .admonition-todo>:last-child,.rst-content .admonition .last,.rst-content .admonition>:last-child,.rst-content .attention .last,.rst-content .attention>:last-child,.rst-content .caution .last,.rst-content .caution>:last-child,.rst-content .danger .last,.rst-content .danger>:last-child,.rst-content .error .last,.rst-content .error>:last-child,.rst-content .hint .last,.rst-content .hint>:last-child,.rst-content .important .last,.rst-content .important>:last-child,.rst-content .note .last,.rst-content .note>:last-child,.rst-content .seealso .last,.rst-content .seealso>:last-child,.rst-content .tip .last,.rst-content .tip>:last-child,.rst-content .warning .last,.rst-content .warning>:last-child{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent!important;border-color:rgba(0,0,0,.1)!important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha>li,.rst-content .toctree-wrapper ol.loweralpha,.rst-content .toctree-wrapper ol.loweralpha>li,.rst-content section ol.loweralpha,.rst-content section ol.loweralpha>li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha>li,.rst-content .toctree-wrapper ol.upperalpha,.rst-content .toctree-wrapper ol.upperalpha>li,.rst-content section ol.upperalpha,.rst-content section ol.upperalpha>li{list-style:upper-alpha}.rst-content .section ol li>*,.rst-content .section ul li>*,.rst-content .toctree-wrapper ol li>*,.rst-content .toctree-wrapper ul li>*,.rst-content section ol li>*,.rst-content section ul li>*{margin-top:12px;margin-bottom:12px}.rst-content .section ol li>:first-child,.rst-content .section ul li>:first-child,.rst-content .toctree-wrapper ol li>:first-child,.rst-content .toctree-wrapper ul li>:first-child,.rst-content section ol li>:first-child,.rst-content section ul li>:first-child{margin-top:0}.rst-content .section ol li>p,.rst-content .section ol li>p:last-child,.rst-content .section ul li>p,.rst-content .section ul li>p:last-child,.rst-content .toctree-wrapper ol li>p,.rst-content .toctree-wrapper ol li>p:last-child,.rst-content .toctree-wrapper ul li>p,.rst-content .toctree-wrapper ul li>p:last-child,.rst-content section ol li>p,.rst-content section ol li>p:last-child,.rst-content section ul li>p,.rst-content section ul li>p:last-child{margin-bottom:12px}.rst-content .section ol li>p:only-child,.rst-content .section ol li>p:only-child:last-child,.rst-content .section ul li>p:only-child,.rst-content .section ul li>p:only-child:last-child,.rst-content .toctree-wrapper ol li>p:only-child,.rst-content .toctree-wrapper ol li>p:only-child:last-child,.rst-content .toctree-wrapper ul li>p:only-child,.rst-content .toctree-wrapper ul li>p:only-child:last-child,.rst-content section ol li>p:only-child,.rst-content section ol li>p:only-child:last-child,.rst-content section ul li>p:only-child,.rst-content section ul li>p:only-child:last-child{margin-bottom:0}.rst-content .section ol li>ol,.rst-content .section ol li>ul,.rst-content .section ul li>ol,.rst-content .section ul li>ul,.rst-content .toctree-wrapper ol li>ol,.rst-content .toctree-wrapper ol li>ul,.rst-content .toctree-wrapper ul li>ol,.rst-content .toctree-wrapper ul li>ul,.rst-content section ol li>ol,.rst-content section ol li>ul,.rst-content section ul li>ol,.rst-content section ul li>ul{margin-bottom:12px}.rst-content .section ol.simple li>*,.rst-content .section ol.simple li ol,.rst-content .section ol.simple li ul,.rst-content .section ul.simple li>*,.rst-content .section ul.simple li ol,.rst-content .section ul.simple li ul,.rst-content .toctree-wrapper ol.simple li>*,.rst-content .toctree-wrapper ol.simple li ol,.rst-content .toctree-wrapper ol.simple li ul,.rst-content .toctree-wrapper ul.simple li>*,.rst-content .toctree-wrapper ul.simple li ol,.rst-content .toctree-wrapper ul.simple li ul,.rst-content section ol.simple li>*,.rst-content section ol.simple li ol,.rst-content section ol.simple li ul,.rst-content section ul.simple li>*,.rst-content section ul.simple li ol,.rst-content section ul.simple li ul{margin-top:0;margin-bottom:0}.rst-content .line-block{margin-left:0;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0}.rst-content .topic-title{font-weight:700;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0 0 24px 24px}.rst-content .align-left{float:left;margin:0 24px 24px 0}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink{opacity:0;font-size:14px;font-family:FontAwesome;margin-left:.5em}.rst-content .code-block-caption .headerlink:focus,.rst-content .code-block-caption:hover .headerlink,.rst-content .eqno .headerlink:focus,.rst-content .eqno:hover .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink:focus,.rst-content .toctree-wrapper>p.caption:hover .headerlink,.rst-content dl dt .headerlink:focus,.rst-content dl dt:hover .headerlink,.rst-content h1 .headerlink:focus,.rst-content h1:hover .headerlink,.rst-content h2 .headerlink:focus,.rst-content h2:hover .headerlink,.rst-content h3 .headerlink:focus,.rst-content h3:hover .headerlink,.rst-content h4 .headerlink:focus,.rst-content h4:hover .headerlink,.rst-content h5 .headerlink:focus,.rst-content h5:hover .headerlink,.rst-content h6 .headerlink:focus,.rst-content h6:hover .headerlink,.rst-content p.caption .headerlink:focus,.rst-content p.caption:hover .headerlink,.rst-content p .headerlink:focus,.rst-content p:hover .headerlink,.rst-content table>caption .headerlink:focus,.rst-content table>caption:hover .headerlink{opacity:1}.rst-content p a{overflow-wrap:anywhere}.rst-content .wy-table td p,.rst-content .wy-table td ul,.rst-content .wy-table th p,.rst-content .wy-table th ul,.rst-content table.docutils td p,.rst-content table.docutils td ul,.rst-content table.docutils th p,.rst-content table.docutils th ul,.rst-content table.field-list td p,.rst-content table.field-list td ul,.rst-content table.field-list th p,.rst-content table.field-list th ul{font-size:inherit}.rst-content .btn:focus{outline:2px solid}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:1px solid #e1e4e5}.rst-content .sidebar dl,.rst-content .sidebar p,.rst-content .sidebar ul{font-size:90%}.rst-content .sidebar .last,.rst-content .sidebar>:last-child{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif;font-weight:700;background:#e1e4e5;padding:6px 12px;margin:-24px -24px 24px;font-size:100%}.rst-content .highlighted{background:#f1c40f;box-shadow:0 0 0 2px #f1c40f;display:inline;font-weight:700}.rst-content .citation-reference,.rst-content .footnote-reference{vertical-align:baseline;position:relative;top:-.4em;line-height:0;font-size:90%}.rst-content .citation-reference>span.fn-bracket,.rst-content .footnote-reference>span.fn-bracket{display:none}.rst-content .hlist{width:100%}.rst-content dl dt span.classifier:before{content:" : "}.rst-content dl dt span.classifier-delimiter{display:none!important}html.writer-html4 .rst-content table.docutils.citation,html.writer-html4 .rst-content table.docutils.footnote{background:none;border:none}html.writer-html4 .rst-content table.docutils.citation td,html.writer-html4 .rst-content table.docutils.citation tr,html.writer-html4 .rst-content table.docutils.footnote td,html.writer-html4 .rst-content table.docutils.footnote tr{border:none;background-color:transparent!important;white-space:normal}html.writer-html4 .rst-content table.docutils.citation td.label,html.writer-html4 .rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{display:grid;grid-template-columns:auto minmax(80%,95%)}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{display:inline-grid;grid-template-columns:max-content auto}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{display:grid;grid-template-columns:auto auto minmax(.65rem,auto) minmax(40%,95%)}html.writer-html5 .rst-content aside.citation>span.label,html.writer-html5 .rst-content aside.footnote>span.label,html.writer-html5 .rst-content div.citation>span.label{grid-column-start:1;grid-column-end:2}html.writer-html5 .rst-content aside.citation>span.backrefs,html.writer-html5 .rst-content aside.footnote>span.backrefs,html.writer-html5 .rst-content div.citation>span.backrefs{grid-column-start:2;grid-column-end:3;grid-row-start:1;grid-row-end:3}html.writer-html5 .rst-content aside.citation>p,html.writer-html5 .rst-content aside.footnote>p,html.writer-html5 .rst-content div.citation>p{grid-column-start:4;grid-column-end:5}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{margin-bottom:24px}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{padding-left:1rem}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dd,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dd,html.writer-html5 .rst-content dl.footnote>dt{margin-bottom:0}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{font-size:.9rem}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.footnote>dt{margin:0 .5rem .5rem 0;line-height:1.2rem;word-break:break-all;font-weight:400}html.writer-html5 .rst-content dl.citation>dt>span.brackets:before,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:before{content:"["}html.writer-html5 .rst-content dl.citation>dt>span.brackets:after,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:after{content:"]"}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a{word-break:keep-all}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a:not(:first-child):before,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.footnote>dd{margin:0 0 .5rem;line-height:1.2rem}html.writer-html5 .rst-content dl.citation>dd p,html.writer-html5 .rst-content dl.footnote>dd p{font-size:.9rem}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{padding-left:1rem;padding-right:1rem;font-size:.9rem;line-height:1.2rem}html.writer-html5 .rst-content aside.citation p,html.writer-html5 .rst-content aside.footnote p,html.writer-html5 .rst-content div.citation p{font-size:.9rem;line-height:1.2rem;margin-bottom:12px}html.writer-html5 .rst-content aside.citation span.backrefs,html.writer-html5 .rst-content aside.footnote span.backrefs,html.writer-html5 .rst-content div.citation span.backrefs{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content aside.citation span.backrefs>a,html.writer-html5 .rst-content aside.footnote span.backrefs>a,html.writer-html5 .rst-content div.citation span.backrefs>a{word-break:keep-all}html.writer-html5 .rst-content aside.citation span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content aside.footnote span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content div.citation span.backrefs>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content aside.citation span.label,html.writer-html5 .rst-content aside.footnote span.label,html.writer-html5 .rst-content div.citation span.label{line-height:1.2rem}html.writer-html5 .rst-content aside.citation-list,html.writer-html5 .rst-content aside.footnote-list,html.writer-html5 .rst-content div.citation-list{margin-bottom:24px}html.writer-html5 .rst-content dl.option-list kbd{font-size:.9rem}.rst-content table.docutils.footnote,html.writer-html4 .rst-content table.docutils.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content aside.footnote-list aside.footnote,html.writer-html5 .rst-content div.citation-list>div.citation,html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{color:grey}.rst-content table.docutils.footnote code,.rst-content table.docutils.footnote tt,html.writer-html4 .rst-content table.docutils.citation code,html.writer-html4 .rst-content table.docutils.citation tt,html.writer-html5 .rst-content aside.footnote-list aside.footnote code,html.writer-html5 .rst-content aside.footnote-list aside.footnote tt,html.writer-html5 .rst-content aside.footnote code,html.writer-html5 .rst-content aside.footnote tt,html.writer-html5 .rst-content div.citation-list>div.citation code,html.writer-html5 .rst-content div.citation-list>div.citation tt,html.writer-html5 .rst-content dl.citation code,html.writer-html5 .rst-content dl.citation tt,html.writer-html5 .rst-content dl.footnote code,html.writer-html5 .rst-content dl.footnote tt{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e1e4e5}html.writer-html5 .rst-content table.docutils th{border:1px solid #e1e4e5}html.writer-html5 .rst-content table.docutils td>p,html.writer-html5 .rst-content table.docutils th>p{line-height:1rem;margin-bottom:0;font-size:.9rem}.rst-content table.docutils td .last,.rst-content table.docutils td .last>:last-child{margin-bottom:0}.rst-content table.field-list,.rst-content table.field-list td{border:none}.rst-content table.field-list td p{line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content code,.rst-content tt{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;padding:2px 5px}.rst-content code big,.rst-content code em,.rst-content tt big,.rst-content tt em{font-size:100%!important;line-height:normal}.rst-content code.literal,.rst-content tt.literal{color:#e74c3c;white-space:normal}.rst-content code.xref,.rst-content tt.xref,a .rst-content code,a .rst-content tt{font-weight:700;color:#404040;overflow-wrap:normal}.rst-content kbd,.rst-content pre,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace}.rst-content a code,.rst-content a tt{color:#2980b9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:700;margin-bottom:12px}.rst-content dl ol,.rst-content dl p,.rst-content dl table,.rst-content dl ul{margin-bottom:12px}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}.rst-content dl dd>ol:last-child,.rst-content dl dd>p:last-child,.rst-content dl dd>table:last-child,.rst-content dl dd>ul:last-child{margin-bottom:0}html.writer-html4 .rst-content dl:not(.docutils),html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple){margin-bottom:24px}html.writer-html4 .rst-content dl:not(.docutils)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980b9;border-top:3px solid #6ab0de;padding:6px;position:relative}html.writer-html4 .rst-content dl:not(.docutils)>dt:before,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:before{color:#6ab0de}html.writer-html4 .rst-content dl:not(.docutils)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{margin-bottom:6px;border:none;border-left:3px solid #ccc;background:#f0f0f0;color:#555}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils)>dt:first-child,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:first-child{margin-top:0}html.writer-html4 .rst-content dl:not(.docutils) code.descclassname,html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descclassname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{background-color:transparent;border:none;padding:0;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .optional,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .property,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .property{display:inline-block;padding-right:8px;max-width:100%}html.writer-html4 .rst-content dl:not(.docutils) .k,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .k{font-style:italic}html.writer-html4 .rst-content dl:not(.docutils) .descclassname,html.writer-html4 .rst-content dl:not(.docutils) .descname,html.writer-html4 .rst-content dl:not(.docutils) .sig-name,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .sig-name{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#000}.rst-content .viewcode-back,.rst-content .viewcode-link{display:inline-block;color:#27ae60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:700}.rst-content code.download,.rst-content tt.download{background:inherit;padding:inherit;font-weight:400;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content code.download span:first-child,.rst-content tt.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{margin-right:4px}.rst-content .guilabel,.rst-content .menuselection{font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .guilabel,.rst-content .menuselection{border:1px solid #7fbbe3;background:#e7f2fa}.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>.kbd,.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>kbd{color:inherit;font-size:80%;background-color:#fff;border:1px solid #a6a6a6;border-radius:4px;box-shadow:0 2px grey;padding:2.4px 6px;margin:auto 0}.rst-content .versionmodified{font-style:italic}@media screen and (max-width:480px){.rst-content .sidebar{width:100%}}span[id*=MathJax-Span]{color:#404040}.math{text-align:center}@font-face{font-family:Lato;src:url(fonts/lato-normal.woff2?bd03a2cc277bbbc338d464e679fe9942) format("woff2"),url(fonts/lato-normal.woff?27bd77b9162d388cb8d4c4217c7c5e2a) format("woff");font-weight:400;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold.woff2?cccb897485813c7c256901dbca54ecf2) format("woff2"),url(fonts/lato-bold.woff?d878b6c29b10beca227e9eef4246111b) format("woff");font-weight:700;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold-italic.woff2?0b6bb6725576b072c5d0b02ecdd1900d) format("woff2"),url(fonts/lato-bold-italic.woff?9c7e4e9eb485b4a121c760e61bc3707c) format("woff");font-weight:700;font-style:italic;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-normal-italic.woff2?4eb103b4d12be57cb1d040ed5e162e9d) format("woff2"),url(fonts/lato-normal-italic.woff?f28f2d6482446544ef1ea1ccc6dd5892) format("woff");font-weight:400;font-style:italic;font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:400;src:url(fonts/Roboto-Slab-Regular.woff2?7abf5b8d04d26a2cafea937019bca958) format("woff2"),url(fonts/Roboto-Slab-Regular.woff?c1be9284088d487c5e3ff0a10a92e58c) format("woff");font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:700;src:url(fonts/Roboto-Slab-Bold.woff2?9984f4a9bda09be08e83f2506954adbe) format("woff2"),url(fonts/Roboto-Slab-Bold.woff?bed5564a116b05148e3b3bea6fb1162a) format("woff");font-display:block} \ No newline at end of file diff --git a/docs/_build/html/_static/doctools.js b/docs/_build/html/_static/doctools.js deleted file mode 100644 index d06a71d..0000000 --- a/docs/_build/html/_static/doctools.js +++ /dev/null @@ -1,156 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Base JavaScript utilities for all Sphinx HTML documentation. - * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ -"use strict"; - -const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ - "TEXTAREA", - "INPUT", - "SELECT", - "BUTTON", -]); - -const _ready = (callback) => { - if (document.readyState !== "loading") { - callback(); - } else { - document.addEventListener("DOMContentLoaded", callback); - } -}; - -/** - * Small JavaScript module for the documentation. - */ -const Documentation = { - init: () => { - Documentation.initDomainIndexTable(); - Documentation.initOnKeyListeners(); - }, - - /** - * i18n support - */ - TRANSLATIONS: {}, - PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), - LOCALE: "unknown", - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext: (string) => { - const translated = Documentation.TRANSLATIONS[string]; - switch (typeof translated) { - case "undefined": - return string; // no translation - case "string": - return translated; // translation exists - default: - return translated[0]; // (singular, plural) translation tuple exists - } - }, - - ngettext: (singular, plural, n) => { - const translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated !== "undefined") - return translated[Documentation.PLURAL_EXPR(n)]; - return n === 1 ? singular : plural; - }, - - addTranslations: (catalog) => { - Object.assign(Documentation.TRANSLATIONS, catalog.messages); - Documentation.PLURAL_EXPR = new Function( - "n", - `return (${catalog.plural_expr})` - ); - Documentation.LOCALE = catalog.locale; - }, - - /** - * helper function to focus on search bar - */ - focusSearchBar: () => { - document.querySelectorAll("input[name=q]")[0]?.focus(); - }, - - /** - * Initialise the domain index toggle buttons - */ - initDomainIndexTable: () => { - const toggler = (el) => { - const idNumber = el.id.substr(7); - const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); - if (el.src.substr(-9) === "minus.png") { - el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; - toggledRows.forEach((el) => (el.style.display = "none")); - } else { - el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; - toggledRows.forEach((el) => (el.style.display = "")); - } - }; - - const togglerElements = document.querySelectorAll("img.toggler"); - togglerElements.forEach((el) => - el.addEventListener("click", (event) => toggler(event.currentTarget)) - ); - togglerElements.forEach((el) => (el.style.display = "")); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); - }, - - initOnKeyListeners: () => { - // only install a listener if it is really needed - if ( - !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && - !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS - ) - return; - - document.addEventListener("keydown", (event) => { - // bail for input elements - if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; - // bail with special keys - if (event.altKey || event.ctrlKey || event.metaKey) return; - - if (!event.shiftKey) { - switch (event.key) { - case "ArrowLeft": - if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; - - const prevLink = document.querySelector('link[rel="prev"]'); - if (prevLink && prevLink.href) { - window.location.href = prevLink.href; - event.preventDefault(); - } - break; - case "ArrowRight": - if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; - - const nextLink = document.querySelector('link[rel="next"]'); - if (nextLink && nextLink.href) { - window.location.href = nextLink.href; - event.preventDefault(); - } - break; - } - } - - // some keyboard layouts may need Shift to get / - switch (event.key) { - case "/": - if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; - Documentation.focusSearchBar(); - event.preventDefault(); - } - }); - }, -}; - -// quick alias for translations -const _ = Documentation.gettext; - -_ready(Documentation.init); diff --git a/docs/_build/html/_static/documentation_options.js b/docs/_build/html/_static/documentation_options.js deleted file mode 100644 index d22d62d..0000000 --- a/docs/_build/html/_static/documentation_options.js +++ /dev/null @@ -1,14 +0,0 @@ -var DOCUMENTATION_OPTIONS = { - URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '26.05.2024', - LANGUAGE: 'english', - COLLAPSE_INDEX: false, - BUILDER: 'html', - FILE_SUFFIX: '.html', - LINK_SUFFIX: '.html', - HAS_SOURCE: true, - SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: false, - SHOW_SEARCH_SUMMARY: true, - ENABLE_SEARCH_SHORTCUTS: true, -}; \ No newline at end of file diff --git a/docs/_build/html/_static/file.png b/docs/_build/html/_static/file.png deleted file mode 100644 index a858a41..0000000 Binary files a/docs/_build/html/_static/file.png and /dev/null differ diff --git a/docs/_build/html/_static/jquery.js b/docs/_build/html/_static/jquery.js deleted file mode 100644 index c4c6022..0000000 --- a/docs/_build/html/_static/jquery.js +++ /dev/null @@ -1,2 +0,0 @@ -/*! jQuery v3.6.0 | (c) OpenJS Foundation and other contributors | jquery.org/license */ -!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],r=Object.getPrototypeOf,s=t.slice,g=t.flat?function(e){return t.flat.call(e)}:function(e){return t.concat.apply([],e)},u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType&&"function"!=typeof e.item},x=function(e){return null!=e&&e===e.window},E=C.document,c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.6.0",S=function(e,t){return new S.fn.init(e,t)};function p(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp(F),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+F),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(p.childNodes),p.childNodes),t[p.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!N[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&(U.test(t)||z.test(t))){(f=ee.test(t)&&ye(e.parentNode)||e)===e&&d.scope||((s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=S)),o=(l=h(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+xe(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){N(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return g(t.replace($,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e&&e.namespaceURI,n=e&&(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:p;return r!=C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),p!=C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.scope=ce(function(e){return a.appendChild(e).appendChild(C.createElement("div")),"undefined"!=typeof e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length}),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=S,!C.getElementsByName||!C.getElementsByName(S).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){var t;a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push("\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},j=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||D,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,D=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="",y.option=!!ce.lastChild;var ge={thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n",""]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function je(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function De(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function qe(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Le(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var _t,zt=[],Ut=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=zt.pop()||S.expando+"_"+wt.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Ut.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Ut.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Ut,"$1"+r):!1!==e.jsonp&&(e.url+=(Tt.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,zt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((_t=E.implementation.createHTMLDocument("").body).innerHTML="
",2===_t.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=Fe(y.pixelPosition,function(e,t){if(t)return t=We(e,n),Pe.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=y.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=y.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),y.elements=c+" "+a,j(b)}function f(a){var b=x[a[v]];return b||(b={},w++,a[v]=w,x[w]=b),b}function g(a,c,d){if(c||(c=b),q)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():u.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||t.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),q)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return y.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(y,b.frag)}function j(a){a||(a=b);var d=f(a);return!y.shivCSS||p||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),q||i(a,d),a}function k(a){for(var b,c=a.getElementsByTagName("*"),e=c.length,f=RegExp("^(?:"+d().join("|")+")$","i"),g=[];e--;)b=c[e],f.test(b.nodeName)&&g.push(b.applyElement(l(b)));return g}function l(a){for(var b,c=a.attributes,d=c.length,e=a.ownerDocument.createElement(A+":"+a.nodeName);d--;)b=c[d],b.specified&&e.setAttribute(b.nodeName,b.nodeValue);return e.style.cssText=a.style.cssText,e}function m(a){for(var b,c=a.split("{"),e=c.length,f=RegExp("(^|[\\s,>+~])("+d().join("|")+")(?=[[\\s,>+~#.:]|$)","gi"),g="$1"+A+"\\:$2";e--;)b=c[e]=c[e].split("}"),b[b.length-1]=b[b.length-1].replace(f,g),c[e]=b.join("}");return c.join("{")}function n(a){for(var b=a.length;b--;)a[b].removeNode()}function o(a){function b(){clearTimeout(g._removeSheetTimer),d&&d.removeNode(!0),d=null}var d,e,g=f(a),h=a.namespaces,i=a.parentWindow;return!B||a.printShived?a:("undefined"==typeof h[A]&&h.add(A),i.attachEvent("onbeforeprint",function(){b();for(var f,g,h,i=a.styleSheets,j=[],l=i.length,n=Array(l);l--;)n[l]=i[l];for(;h=n.pop();)if(!h.disabled&&z.test(h.media)){try{f=h.imports,g=f.length}catch(o){g=0}for(l=0;g>l;l++)n.push(f[l]);try{j.push(h.cssText)}catch(o){}}j=m(j.reverse().join("")),e=k(a),d=c(a,j)}),i.attachEvent("onafterprint",function(){n(e),clearTimeout(g._removeSheetTimer),g._removeSheetTimer=setTimeout(b,500)}),a.printShived=!0,a)}var p,q,r="3.7.3",s=a.html5||{},t=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,u=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,v="_html5shiv",w=0,x={};!function(){try{var a=b.createElement("a");a.innerHTML="",p="hidden"in a,q=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){p=!0,q=!0}}();var y={elements:s.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:r,shivCSS:s.shivCSS!==!1,supportsUnknownElements:q,shivMethods:s.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=y,j(b);var z=/^$|\b(?:all|print)\b/,A="html5shiv",B=!q&&function(){var c=b.documentElement;return!("undefined"==typeof b.namespaces||"undefined"==typeof b.parentWindow||"undefined"==typeof c.applyElement||"undefined"==typeof c.removeNode||"undefined"==typeof a.attachEvent)}();y.type+=" print",y.shivPrint=o,o(b),"object"==typeof module&&module.exports&&(module.exports=y)}("undefined"!=typeof window?window:this,document); \ No newline at end of file diff --git a/docs/_build/html/_static/js/html5shiv.min.js b/docs/_build/html/_static/js/html5shiv.min.js deleted file mode 100644 index cd1c674..0000000 --- a/docs/_build/html/_static/js/html5shiv.min.js +++ /dev/null @@ -1,4 +0,0 @@ -/** -* @preserve HTML5 Shiv 3.7.3 | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed -*/ -!function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=t.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=t.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),t.elements=c+" "+a,j(b)}function f(a){var b=s[a[q]];return b||(b={},r++,a[q]=r,s[r]=b),b}function g(a,c,d){if(c||(c=b),l)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():p.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||o.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),l)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return t.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(t,b.frag)}function j(a){a||(a=b);var d=f(a);return!t.shivCSS||k||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||i(a,d),a}var k,l,m="3.7.3-pre",n=a.html5||{},o=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,q="_html5shiv",r=0,s={};!function(){try{var a=b.createElement("a");a.innerHTML="",k="hidden"in a,l=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){k=!0,l=!0}}();var t={elements:n.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:m,shivCSS:n.shivCSS!==!1,supportsUnknownElements:l,shivMethods:n.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=t,j(b),"object"==typeof module&&module.exports&&(module.exports=t)}("undefined"!=typeof window?window:this,document); \ No newline at end of file diff --git a/docs/_build/html/_static/js/theme.js b/docs/_build/html/_static/js/theme.js deleted file mode 100644 index 1fddb6e..0000000 --- a/docs/_build/html/_static/js/theme.js +++ /dev/null @@ -1 +0,0 @@ -!function(n){var e={};function t(i){if(e[i])return e[i].exports;var o=e[i]={i:i,l:!1,exports:{}};return n[i].call(o.exports,o,o.exports,t),o.l=!0,o.exports}t.m=n,t.c=e,t.d=function(n,e,i){t.o(n,e)||Object.defineProperty(n,e,{enumerable:!0,get:i})},t.r=function(n){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(n,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(n,"__esModule",{value:!0})},t.t=function(n,e){if(1&e&&(n=t(n)),8&e)return n;if(4&e&&"object"==typeof n&&n&&n.__esModule)return n;var i=Object.create(null);if(t.r(i),Object.defineProperty(i,"default",{enumerable:!0,value:n}),2&e&&"string"!=typeof n)for(var o in n)t.d(i,o,function(e){return n[e]}.bind(null,o));return i},t.n=function(n){var e=n&&n.__esModule?function(){return n.default}:function(){return n};return t.d(e,"a",e),e},t.o=function(n,e){return Object.prototype.hasOwnProperty.call(n,e)},t.p="",t(t.s=0)}([function(n,e,t){t(1),n.exports=t(3)},function(n,e,t){(function(){var e="undefined"!=typeof window?window.jQuery:t(2);n.exports.ThemeNav={navBar:null,win:null,winScroll:!1,winResize:!1,linkScroll:!1,winPosition:0,winHeight:null,docHeight:null,isRunning:!1,enable:function(n){var t=this;void 0===n&&(n=!0),t.isRunning||(t.isRunning=!0,e((function(e){t.init(e),t.reset(),t.win.on("hashchange",t.reset),n&&t.win.on("scroll",(function(){t.linkScroll||t.winScroll||(t.winScroll=!0,requestAnimationFrame((function(){t.onScroll()})))})),t.win.on("resize",(function(){t.winResize||(t.winResize=!0,requestAnimationFrame((function(){t.onResize()})))})),t.onResize()})))},enableSticky:function(){this.enable(!0)},init:function(n){n(document);var e=this;this.navBar=n("div.wy-side-scroll:first"),this.win=n(window),n(document).on("click","[data-toggle='wy-nav-top']",(function(){n("[data-toggle='wy-nav-shift']").toggleClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift")})).on("click",".wy-menu-vertical .current ul li a",(function(){var t=n(this);n("[data-toggle='wy-nav-shift']").removeClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift"),e.toggleCurrent(t),e.hashChange()})).on("click","[data-toggle='rst-current-version']",(function(){n("[data-toggle='rst-versions']").toggleClass("shift-up")})),n("table.docutils:not(.field-list,.footnote,.citation)").wrap("
"),n("table.docutils.footnote").wrap("
"),n("table.docutils.citation").wrap("
"),n(".wy-menu-vertical ul").not(".simple").siblings("a").each((function(){var t=n(this);expand=n(''),expand.on("click",(function(n){return e.toggleCurrent(t),n.stopPropagation(),!1})),t.prepend(expand)}))},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),t=e.find('[href="'+n+'"]');if(0===t.length){var i=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(t=e.find('[href="#'+i.attr("id")+'"]')).length&&(t=e.find('[href="#"]'))}if(t.length>0){$(".wy-menu-vertical .current").removeClass("current").attr("aria-expanded","false"),t.addClass("current").attr("aria-expanded","true"),t.closest("li.toctree-l1").parent().addClass("current").attr("aria-expanded","true");for(let n=1;n<=10;n++)t.closest("li.toctree-l"+n).addClass("current").attr("aria-expanded","true");t[0].scrollIntoView()}}catch(n){console.log("Error expanding nav for anchor",n)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,t=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(t),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",(function(){this.linkScroll=!1}))},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current").attr("aria-expanded","false"),e.siblings().find("li.current").removeClass("current").attr("aria-expanded","false");var t=e.find("> ul li");t.length&&(t.removeClass("current").attr("aria-expanded","false"),e.toggleClass("current").attr("aria-expanded",(function(n,e){return"true"==e?"false":"true"})))}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:n.exports.ThemeNav,StickyNav:n.exports.ThemeNav}),function(){for(var n=0,e=["ms","moz","webkit","o"],t=0;t0 - var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 - var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 - var s_v = "^(" + C + ")?" + v; // vowel in stem - - this.stemWord = function (w) { - var stem; - var suffix; - var firstch; - var origword = w; - - if (w.length < 3) - return w; - - var re; - var re2; - var re3; - var re4; - - firstch = w.substr(0,1); - if (firstch == "y") - w = firstch.toUpperCase() + w.substr(1); - - // Step 1a - re = /^(.+?)(ss|i)es$/; - re2 = /^(.+?)([^s])s$/; - - if (re.test(w)) - w = w.replace(re,"$1$2"); - else if (re2.test(w)) - w = w.replace(re2,"$1$2"); - - // Step 1b - re = /^(.+?)eed$/; - re2 = /^(.+?)(ed|ing)$/; - if (re.test(w)) { - var fp = re.exec(w); - re = new RegExp(mgr0); - if (re.test(fp[1])) { - re = /.$/; - w = w.replace(re,""); - } - } - else if (re2.test(w)) { - var fp = re2.exec(w); - stem = fp[1]; - re2 = new RegExp(s_v); - if (re2.test(stem)) { - w = stem; - re2 = /(at|bl|iz)$/; - re3 = new RegExp("([^aeiouylsz])\\1$"); - re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); - if (re2.test(w)) - w = w + "e"; - else if (re3.test(w)) { - re = /.$/; - w = w.replace(re,""); - } - else if (re4.test(w)) - w = w + "e"; - } - } - - // Step 1c - re = /^(.+?)y$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(s_v); - if (re.test(stem)) - w = stem + "i"; - } - - // Step 2 - re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - suffix = fp[2]; - re = new RegExp(mgr0); - if (re.test(stem)) - w = stem + step2list[suffix]; - } - - // Step 3 - re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - suffix = fp[2]; - re = new RegExp(mgr0); - if (re.test(stem)) - w = stem + step3list[suffix]; - } - - // Step 4 - re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; - re2 = /^(.+?)(s|t)(ion)$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(mgr1); - if (re.test(stem)) - w = stem; - } - else if (re2.test(w)) { - var fp = re2.exec(w); - stem = fp[1] + fp[2]; - re2 = new RegExp(mgr1); - if (re2.test(stem)) - w = stem; - } - - // Step 5 - re = /^(.+?)e$/; - if (re.test(w)) { - var fp = re.exec(w); - stem = fp[1]; - re = new RegExp(mgr1); - re2 = new RegExp(meq1); - re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); - if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) - w = stem; - } - re = /ll$/; - re2 = new RegExp(mgr1); - if (re.test(w) && re2.test(w)) { - re = /.$/; - w = w.replace(re,""); - } - - // and turn initial Y back to y - if (firstch == "y") - w = firstch.toLowerCase() + w.substr(1); - return w; - } -} - diff --git a/docs/_build/html/_static/minus.png b/docs/_build/html/_static/minus.png deleted file mode 100644 index d96755f..0000000 Binary files a/docs/_build/html/_static/minus.png and /dev/null differ diff --git a/docs/_build/html/_static/nbsphinx-broken-thumbnail.svg b/docs/_build/html/_static/nbsphinx-broken-thumbnail.svg deleted file mode 100644 index 4919ca8..0000000 --- a/docs/_build/html/_static/nbsphinx-broken-thumbnail.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - diff --git a/docs/_build/html/_static/nbsphinx-code-cells.css b/docs/_build/html/_static/nbsphinx-code-cells.css deleted file mode 100644 index a3fb27c..0000000 --- a/docs/_build/html/_static/nbsphinx-code-cells.css +++ /dev/null @@ -1,259 +0,0 @@ -/* remove conflicting styling from Sphinx themes */ -div.nbinput.container div.prompt *, -div.nboutput.container div.prompt *, -div.nbinput.container div.input_area pre, -div.nboutput.container div.output_area pre, -div.nbinput.container div.input_area .highlight, -div.nboutput.container div.output_area .highlight { - border: none; - padding: 0; - margin: 0; - box-shadow: none; -} - -div.nbinput.container > div[class*=highlight], -div.nboutput.container > div[class*=highlight] { - margin: 0; -} - -div.nbinput.container div.prompt *, -div.nboutput.container div.prompt * { - background: none; -} - -div.nboutput.container div.output_area .highlight, -div.nboutput.container div.output_area pre { - background: unset; -} - -div.nboutput.container div.output_area div.highlight { - color: unset; /* override Pygments text color */ -} - -/* avoid gaps between output lines */ -div.nboutput.container div[class*=highlight] pre { - line-height: normal; -} - -/* input/output containers */ -div.nbinput.container, -div.nboutput.container { - display: -webkit-flex; - display: flex; - align-items: flex-start; - margin: 0; - width: 100%; -} -@media (max-width: 540px) { - div.nbinput.container, - div.nboutput.container { - flex-direction: column; - } -} - -/* input container */ -div.nbinput.container { - padding-top: 5px; -} - -/* last container */ -div.nblast.container { - padding-bottom: 5px; -} - -/* input prompt */ -div.nbinput.container div.prompt pre, -/* for sphinx_immaterial theme: */ -div.nbinput.container div.prompt pre > code { - color: #307FC1; -} - -/* output prompt */ -div.nboutput.container div.prompt pre, -/* for sphinx_immaterial theme: */ -div.nboutput.container div.prompt pre > code { - color: #BF5B3D; -} - -/* all prompts */ -div.nbinput.container div.prompt, -div.nboutput.container div.prompt { - width: 4.5ex; - padding-top: 5px; - position: relative; - user-select: none; -} - -div.nbinput.container div.prompt > div, -div.nboutput.container div.prompt > div { - position: absolute; - right: 0; - margin-right: 0.3ex; -} - -@media (max-width: 540px) { - div.nbinput.container div.prompt, - div.nboutput.container div.prompt { - width: unset; - text-align: left; - padding: 0.4em; - } - div.nboutput.container div.prompt.empty { - padding: 0; - } - - div.nbinput.container div.prompt > div, - div.nboutput.container div.prompt > div { - position: unset; - } -} - -/* disable scrollbars and line breaks on prompts */ -div.nbinput.container div.prompt pre, -div.nboutput.container div.prompt pre { - overflow: hidden; - white-space: pre; -} - -/* input/output area */ -div.nbinput.container div.input_area, -div.nboutput.container div.output_area { - -webkit-flex: 1; - flex: 1; - overflow: auto; -} -@media (max-width: 540px) { - div.nbinput.container div.input_area, - div.nboutput.container div.output_area { - width: 100%; - } -} - -/* input area */ -div.nbinput.container div.input_area { - border: 1px solid #e0e0e0; - border-radius: 2px; - /*background: #f5f5f5;*/ -} - -/* override MathJax center alignment in output cells */ -div.nboutput.container div[class*=MathJax] { - text-align: left !important; -} - -/* override sphinx.ext.imgmath center alignment in output cells */ -div.nboutput.container div.math p { - text-align: left; -} - -/* standard error */ -div.nboutput.container div.output_area.stderr { - background: #fdd; -} - -/* ANSI colors */ -.ansi-black-fg { color: #3E424D; } -.ansi-black-bg { background-color: #3E424D; } -.ansi-black-intense-fg { color: #282C36; } -.ansi-black-intense-bg { background-color: #282C36; } -.ansi-red-fg { color: #E75C58; } -.ansi-red-bg { background-color: #E75C58; } -.ansi-red-intense-fg { color: #B22B31; } -.ansi-red-intense-bg { background-color: #B22B31; } -.ansi-green-fg { color: #00A250; } -.ansi-green-bg { background-color: #00A250; } -.ansi-green-intense-fg { color: #007427; } -.ansi-green-intense-bg { background-color: #007427; } -.ansi-yellow-fg { color: #DDB62B; } -.ansi-yellow-bg { background-color: #DDB62B; } -.ansi-yellow-intense-fg { color: #B27D12; } -.ansi-yellow-intense-bg { background-color: #B27D12; } -.ansi-blue-fg { color: #208FFB; } -.ansi-blue-bg { background-color: #208FFB; } -.ansi-blue-intense-fg { color: #0065CA; } -.ansi-blue-intense-bg { background-color: #0065CA; } -.ansi-magenta-fg { color: #D160C4; } -.ansi-magenta-bg { background-color: #D160C4; } -.ansi-magenta-intense-fg { color: #A03196; } -.ansi-magenta-intense-bg { background-color: #A03196; } -.ansi-cyan-fg { color: #60C6C8; } -.ansi-cyan-bg { background-color: #60C6C8; } -.ansi-cyan-intense-fg { color: #258F8F; } -.ansi-cyan-intense-bg { background-color: #258F8F; } -.ansi-white-fg { color: #C5C1B4; } -.ansi-white-bg { background-color: #C5C1B4; } -.ansi-white-intense-fg { color: #A1A6B2; } -.ansi-white-intense-bg { background-color: #A1A6B2; } - -.ansi-default-inverse-fg { color: #FFFFFF; } -.ansi-default-inverse-bg { background-color: #000000; } - -.ansi-bold { font-weight: bold; } -.ansi-underline { text-decoration: underline; } - - -div.nbinput.container div.input_area div[class*=highlight] > pre, -div.nboutput.container div.output_area div[class*=highlight] > pre, -div.nboutput.container div.output_area div[class*=highlight].math, -div.nboutput.container div.output_area.rendered_html, -div.nboutput.container div.output_area > div.output_javascript, -div.nboutput.container div.output_area:not(.rendered_html) > img{ - padding: 5px; - margin: 0; -} - -/* fix copybtn overflow problem in chromium (needed for 'sphinx_copybutton') */ -div.nbinput.container div.input_area > div[class^='highlight'], -div.nboutput.container div.output_area > div[class^='highlight']{ - overflow-y: hidden; -} - -/* hide copy button on prompts for 'sphinx_copybutton' extension ... */ -.prompt .copybtn, -/* ... and 'sphinx_immaterial' theme */ -.prompt .md-clipboard.md-icon { - display: none; -} - -/* Some additional styling taken form the Jupyter notebook CSS */ -.jp-RenderedHTMLCommon table, -div.rendered_html table { - border: none; - border-collapse: collapse; - border-spacing: 0; - color: black; - font-size: 12px; - table-layout: fixed; -} -.jp-RenderedHTMLCommon thead, -div.rendered_html thead { - border-bottom: 1px solid black; - vertical-align: bottom; -} -.jp-RenderedHTMLCommon tr, -.jp-RenderedHTMLCommon th, -.jp-RenderedHTMLCommon td, -div.rendered_html tr, -div.rendered_html th, -div.rendered_html td { - text-align: right; - vertical-align: middle; - padding: 0.5em 0.5em; - line-height: normal; - white-space: normal; - max-width: none; - border: none; -} -.jp-RenderedHTMLCommon th, -div.rendered_html th { - font-weight: bold; -} -.jp-RenderedHTMLCommon tbody tr:nth-child(odd), -div.rendered_html tbody tr:nth-child(odd) { - background: #f5f5f5; -} -.jp-RenderedHTMLCommon tbody tr:hover, -div.rendered_html tbody tr:hover { - background: rgba(66, 165, 245, 0.2); -} - diff --git a/docs/_build/html/_static/nbsphinx-gallery.css b/docs/_build/html/_static/nbsphinx-gallery.css deleted file mode 100644 index 365c27a..0000000 --- a/docs/_build/html/_static/nbsphinx-gallery.css +++ /dev/null @@ -1,31 +0,0 @@ -.nbsphinx-gallery { - display: grid; - grid-template-columns: repeat(auto-fill, minmax(160px, 1fr)); - gap: 5px; - margin-top: 1em; - margin-bottom: 1em; -} - -.nbsphinx-gallery > a { - padding: 5px; - border: 1px dotted currentColor; - border-radius: 2px; - text-align: center; -} - -.nbsphinx-gallery > a:hover { - border-style: solid; -} - -.nbsphinx-gallery img { - max-width: 100%; - max-height: 100%; -} - -.nbsphinx-gallery > a > div:first-child { - display: flex; - align-items: start; - justify-content: center; - height: 120px; - margin-bottom: 5px; -} diff --git a/docs/_build/html/_static/nbsphinx-no-thumbnail.svg b/docs/_build/html/_static/nbsphinx-no-thumbnail.svg deleted file mode 100644 index 9dca758..0000000 --- a/docs/_build/html/_static/nbsphinx-no-thumbnail.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - - diff --git a/docs/_build/html/_static/plus.png b/docs/_build/html/_static/plus.png deleted file mode 100644 index 7107cec..0000000 Binary files a/docs/_build/html/_static/plus.png and /dev/null differ diff --git a/docs/_build/html/_static/pygments.css b/docs/_build/html/_static/pygments.css deleted file mode 100644 index 84ab303..0000000 --- a/docs/_build/html/_static/pygments.css +++ /dev/null @@ -1,75 +0,0 @@ -pre { line-height: 125%; } -td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } -span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } -td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } -span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } -.highlight .hll { background-color: #ffffcc } -.highlight { background: #f8f8f8; } -.highlight .c { color: #3D7B7B; font-style: italic } /* Comment */ -.highlight .err { border: 1px solid #FF0000 } /* Error */ -.highlight .k { color: #008000; font-weight: bold } /* Keyword */ -.highlight .o { color: #666666 } /* Operator */ -.highlight .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */ -.highlight .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */ -.highlight .cp { color: #9C6500 } /* Comment.Preproc */ -.highlight .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */ -.highlight .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */ -.highlight .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */ -.highlight .gd { color: #A00000 } /* Generic.Deleted */ -.highlight .ge { font-style: italic } /* Generic.Emph */ -.highlight .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */ -.highlight .gr { color: #E40000 } /* Generic.Error */ -.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ -.highlight .gi { color: #008400 } /* Generic.Inserted */ -.highlight .go { color: #717171 } /* Generic.Output */ -.highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ -.highlight .gs { font-weight: bold } /* Generic.Strong */ -.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ -.highlight .gt { color: #0044DD } /* Generic.Traceback */ -.highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */ -.highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */ -.highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */ -.highlight .kp { color: #008000 } /* Keyword.Pseudo */ -.highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */ -.highlight .kt { color: #B00040 } /* Keyword.Type */ -.highlight .m { color: #666666 } /* Literal.Number */ -.highlight .s { color: #BA2121 } /* Literal.String */ -.highlight .na { color: #687822 } /* Name.Attribute */ -.highlight .nb { color: #008000 } /* Name.Builtin */ -.highlight .nc { color: #0000FF; font-weight: bold } /* Name.Class */ -.highlight .no { color: #880000 } /* Name.Constant */ -.highlight .nd { color: #AA22FF } /* Name.Decorator */ -.highlight .ni { color: #717171; font-weight: bold } /* Name.Entity */ -.highlight .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */ -.highlight .nf { color: #0000FF } /* Name.Function */ -.highlight .nl { color: #767600 } /* Name.Label */ -.highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ -.highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */ -.highlight .nv { color: #19177C } /* Name.Variable */ -.highlight .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ -.highlight .w { color: #bbbbbb } /* Text.Whitespace */ -.highlight .mb { color: #666666 } /* Literal.Number.Bin */ -.highlight .mf { color: #666666 } /* Literal.Number.Float */ -.highlight .mh { color: #666666 } /* Literal.Number.Hex */ -.highlight .mi { color: #666666 } /* Literal.Number.Integer */ -.highlight .mo { color: #666666 } /* Literal.Number.Oct */ -.highlight .sa { color: #BA2121 } /* Literal.String.Affix */ -.highlight .sb { color: #BA2121 } /* Literal.String.Backtick */ -.highlight .sc { color: #BA2121 } /* Literal.String.Char */ -.highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */ -.highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */ -.highlight .s2 { color: #BA2121 } /* Literal.String.Double */ -.highlight .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */ -.highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */ -.highlight .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */ -.highlight .sx { color: #008000 } /* Literal.String.Other */ -.highlight .sr { color: #A45A77 } /* Literal.String.Regex */ -.highlight .s1 { color: #BA2121 } /* Literal.String.Single */ -.highlight .ss { color: #19177C } /* Literal.String.Symbol */ -.highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */ -.highlight .fm { color: #0000FF } /* Name.Function.Magic */ -.highlight .vc { color: #19177C } /* Name.Variable.Class */ -.highlight .vg { color: #19177C } /* Name.Variable.Global */ -.highlight .vi { color: #19177C } /* Name.Variable.Instance */ -.highlight .vm { color: #19177C } /* Name.Variable.Magic */ -.highlight .il { color: #666666 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/docs/_build/html/_static/searchtools.js b/docs/_build/html/_static/searchtools.js deleted file mode 100644 index 97d56a7..0000000 --- a/docs/_build/html/_static/searchtools.js +++ /dev/null @@ -1,566 +0,0 @@ -/* - * searchtools.js - * ~~~~~~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for the full-text search. - * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ -"use strict"; - -/** - * Simple result scoring code. - */ -if (typeof Scorer === "undefined") { - var Scorer = { - // Implement the following function to further tweak the score for each result - // The function takes a result array [docname, title, anchor, descr, score, filename] - // and returns the new score. - /* - score: result => { - const [docname, title, anchor, descr, score, filename] = result - return score - }, - */ - - // query matches the full name of an object - objNameMatch: 11, - // or matches in the last dotted part of the object name - objPartialMatch: 6, - // Additive scores depending on the priority of the object - objPrio: { - 0: 15, // used to be importantResults - 1: 5, // used to be objectResults - 2: -5, // used to be unimportantResults - }, - // Used when the priority is not in the mapping. - objPrioDefault: 0, - - // query found in title - title: 15, - partialTitle: 7, - // query found in terms - term: 5, - partialTerm: 2, - }; -} - -const _removeChildren = (element) => { - while (element && element.lastChild) element.removeChild(element.lastChild); -}; - -/** - * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping - */ -const _escapeRegExp = (string) => - string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string - -const _displayItem = (item, searchTerms) => { - const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; - const docUrlRoot = DOCUMENTATION_OPTIONS.URL_ROOT; - const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; - const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; - const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; - - const [docName, title, anchor, descr, score, _filename] = item; - - let listItem = document.createElement("li"); - let requestUrl; - let linkUrl; - if (docBuilder === "dirhtml") { - // dirhtml builder - let dirname = docName + "/"; - if (dirname.match(/\/index\/$/)) - dirname = dirname.substring(0, dirname.length - 6); - else if (dirname === "index/") dirname = ""; - requestUrl = docUrlRoot + dirname; - linkUrl = requestUrl; - } else { - // normal html builders - requestUrl = docUrlRoot + docName + docFileSuffix; - linkUrl = docName + docLinkSuffix; - } - let linkEl = listItem.appendChild(document.createElement("a")); - linkEl.href = linkUrl + anchor; - linkEl.dataset.score = score; - linkEl.innerHTML = title; - if (descr) - listItem.appendChild(document.createElement("span")).innerHTML = - " (" + descr + ")"; - else if (showSearchSummary) - fetch(requestUrl) - .then((responseData) => responseData.text()) - .then((data) => { - if (data) - listItem.appendChild( - Search.makeSearchSummary(data, searchTerms) - ); - }); - Search.output.appendChild(listItem); -}; -const _finishSearch = (resultCount) => { - Search.stopPulse(); - Search.title.innerText = _("Search Results"); - if (!resultCount) - Search.status.innerText = Documentation.gettext( - "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." - ); - else - Search.status.innerText = _( - `Search finished, found ${resultCount} page(s) matching the search query.` - ); -}; -const _displayNextItem = ( - results, - resultCount, - searchTerms -) => { - // results left, load the summary and display it - // this is intended to be dynamic (don't sub resultsCount) - if (results.length) { - _displayItem(results.pop(), searchTerms); - setTimeout( - () => _displayNextItem(results, resultCount, searchTerms), - 5 - ); - } - // search finished, update title and status message - else _finishSearch(resultCount); -}; - -/** - * Default splitQuery function. Can be overridden in ``sphinx.search`` with a - * custom function per language. - * - * The regular expression works by splitting the string on consecutive characters - * that are not Unicode letters, numbers, underscores, or emoji characters. - * This is the same as ``\W+`` in Python, preserving the surrogate pair area. - */ -if (typeof splitQuery === "undefined") { - var splitQuery = (query) => query - .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) - .filter(term => term) // remove remaining empty strings -} - -/** - * Search Module - */ -const Search = { - _index: null, - _queued_query: null, - _pulse_status: -1, - - htmlToText: (htmlString) => { - const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); - htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() }); - const docContent = htmlElement.querySelector('[role="main"]'); - if (docContent !== undefined) return docContent.textContent; - console.warn( - "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template." - ); - return ""; - }, - - init: () => { - const query = new URLSearchParams(window.location.search).get("q"); - document - .querySelectorAll('input[name="q"]') - .forEach((el) => (el.value = query)); - if (query) Search.performSearch(query); - }, - - loadIndex: (url) => - (document.body.appendChild(document.createElement("script")).src = url), - - setIndex: (index) => { - Search._index = index; - if (Search._queued_query !== null) { - const query = Search._queued_query; - Search._queued_query = null; - Search.query(query); - } - }, - - hasIndex: () => Search._index !== null, - - deferQuery: (query) => (Search._queued_query = query), - - stopPulse: () => (Search._pulse_status = -1), - - startPulse: () => { - if (Search._pulse_status >= 0) return; - - const pulse = () => { - Search._pulse_status = (Search._pulse_status + 1) % 4; - Search.dots.innerText = ".".repeat(Search._pulse_status); - if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); - }; - pulse(); - }, - - /** - * perform a search for something (or wait until index is loaded) - */ - performSearch: (query) => { - // create the required interface elements - const searchText = document.createElement("h2"); - searchText.textContent = _("Searching"); - const searchSummary = document.createElement("p"); - searchSummary.classList.add("search-summary"); - searchSummary.innerText = ""; - const searchList = document.createElement("ul"); - searchList.classList.add("search"); - - const out = document.getElementById("search-results"); - Search.title = out.appendChild(searchText); - Search.dots = Search.title.appendChild(document.createElement("span")); - Search.status = out.appendChild(searchSummary); - Search.output = out.appendChild(searchList); - - const searchProgress = document.getElementById("search-progress"); - // Some themes don't use the search progress node - if (searchProgress) { - searchProgress.innerText = _("Preparing search..."); - } - Search.startPulse(); - - // index already loaded, the browser was quick! - if (Search.hasIndex()) Search.query(query); - else Search.deferQuery(query); - }, - - /** - * execute search (requires search index to be loaded) - */ - query: (query) => { - const filenames = Search._index.filenames; - const docNames = Search._index.docnames; - const titles = Search._index.titles; - const allTitles = Search._index.alltitles; - const indexEntries = Search._index.indexentries; - - // stem the search terms and add them to the correct list - const stemmer = new Stemmer(); - const searchTerms = new Set(); - const excludedTerms = new Set(); - const highlightTerms = new Set(); - const objectTerms = new Set(splitQuery(query.toLowerCase().trim())); - splitQuery(query.trim()).forEach((queryTerm) => { - const queryTermLower = queryTerm.toLowerCase(); - - // maybe skip this "word" - // stopwords array is from language_data.js - if ( - stopwords.indexOf(queryTermLower) !== -1 || - queryTerm.match(/^\d+$/) - ) - return; - - // stem the word - let word = stemmer.stemWord(queryTermLower); - // select the correct list - if (word[0] === "-") excludedTerms.add(word.substr(1)); - else { - searchTerms.add(word); - highlightTerms.add(queryTermLower); - } - }); - - if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js - localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" ")) - } - - // console.debug("SEARCH: searching for:"); - // console.info("required: ", [...searchTerms]); - // console.info("excluded: ", [...excludedTerms]); - - // array of [docname, title, anchor, descr, score, filename] - let results = []; - _removeChildren(document.getElementById("search-progress")); - - const queryLower = query.toLowerCase(); - for (const [title, foundTitles] of Object.entries(allTitles)) { - if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) { - for (const [file, id] of foundTitles) { - let score = Math.round(100 * queryLower.length / title.length) - results.push([ - docNames[file], - titles[file] !== title ? `${titles[file]} > ${title}` : title, - id !== null ? "#" + id : "", - null, - score, - filenames[file], - ]); - } - } - } - - // search for explicit entries in index directives - for (const [entry, foundEntries] of Object.entries(indexEntries)) { - if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { - for (const [file, id] of foundEntries) { - let score = Math.round(100 * queryLower.length / entry.length) - results.push([ - docNames[file], - titles[file], - id ? "#" + id : "", - null, - score, - filenames[file], - ]); - } - } - } - - // lookup as object - objectTerms.forEach((term) => - results.push(...Search.performObjectSearch(term, objectTerms)) - ); - - // lookup as search terms in fulltext - results.push(...Search.performTermsSearch(searchTerms, excludedTerms)); - - // let the scorer override scores with a custom scoring function - if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item))); - - // now sort the results by score (in opposite order of appearance, since the - // display function below uses pop() to retrieve items) and then - // alphabetically - results.sort((a, b) => { - const leftScore = a[4]; - const rightScore = b[4]; - if (leftScore === rightScore) { - // same score: sort alphabetically - const leftTitle = a[1].toLowerCase(); - const rightTitle = b[1].toLowerCase(); - if (leftTitle === rightTitle) return 0; - return leftTitle > rightTitle ? -1 : 1; // inverted is intentional - } - return leftScore > rightScore ? 1 : -1; - }); - - // remove duplicate search results - // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept - let seen = new Set(); - results = results.reverse().reduce((acc, result) => { - let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(','); - if (!seen.has(resultStr)) { - acc.push(result); - seen.add(resultStr); - } - return acc; - }, []); - - results = results.reverse(); - - // for debugging - //Search.lastresults = results.slice(); // a copy - // console.info("search results:", Search.lastresults); - - // print the results - _displayNextItem(results, results.length, searchTerms); - }, - - /** - * search for object names - */ - performObjectSearch: (object, objectTerms) => { - const filenames = Search._index.filenames; - const docNames = Search._index.docnames; - const objects = Search._index.objects; - const objNames = Search._index.objnames; - const titles = Search._index.titles; - - const results = []; - - const objectSearchCallback = (prefix, match) => { - const name = match[4] - const fullname = (prefix ? prefix + "." : "") + name; - const fullnameLower = fullname.toLowerCase(); - if (fullnameLower.indexOf(object) < 0) return; - - let score = 0; - const parts = fullnameLower.split("."); - - // check for different match types: exact matches of full name or - // "last name" (i.e. last dotted part) - if (fullnameLower === object || parts.slice(-1)[0] === object) - score += Scorer.objNameMatch; - else if (parts.slice(-1)[0].indexOf(object) > -1) - score += Scorer.objPartialMatch; // matches in last name - - const objName = objNames[match[1]][2]; - const title = titles[match[0]]; - - // If more than one term searched for, we require other words to be - // found in the name/title/description - const otherTerms = new Set(objectTerms); - otherTerms.delete(object); - if (otherTerms.size > 0) { - const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase(); - if ( - [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0) - ) - return; - } - - let anchor = match[3]; - if (anchor === "") anchor = fullname; - else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname; - - const descr = objName + _(", in ") + title; - - // add custom score for some objects according to scorer - if (Scorer.objPrio.hasOwnProperty(match[2])) - score += Scorer.objPrio[match[2]]; - else score += Scorer.objPrioDefault; - - results.push([ - docNames[match[0]], - fullname, - "#" + anchor, - descr, - score, - filenames[match[0]], - ]); - }; - Object.keys(objects).forEach((prefix) => - objects[prefix].forEach((array) => - objectSearchCallback(prefix, array) - ) - ); - return results; - }, - - /** - * search for full-text terms in the index - */ - performTermsSearch: (searchTerms, excludedTerms) => { - // prepare search - const terms = Search._index.terms; - const titleTerms = Search._index.titleterms; - const filenames = Search._index.filenames; - const docNames = Search._index.docnames; - const titles = Search._index.titles; - - const scoreMap = new Map(); - const fileMap = new Map(); - - // perform the search on the required terms - searchTerms.forEach((word) => { - const files = []; - const arr = [ - { files: terms[word], score: Scorer.term }, - { files: titleTerms[word], score: Scorer.title }, - ]; - // add support for partial matches - if (word.length > 2) { - const escapedWord = _escapeRegExp(word); - Object.keys(terms).forEach((term) => { - if (term.match(escapedWord) && !terms[word]) - arr.push({ files: terms[term], score: Scorer.partialTerm }); - }); - Object.keys(titleTerms).forEach((term) => { - if (term.match(escapedWord) && !titleTerms[word]) - arr.push({ files: titleTerms[word], score: Scorer.partialTitle }); - }); - } - - // no match but word was a required one - if (arr.every((record) => record.files === undefined)) return; - - // found search word in contents - arr.forEach((record) => { - if (record.files === undefined) return; - - let recordFiles = record.files; - if (recordFiles.length === undefined) recordFiles = [recordFiles]; - files.push(...recordFiles); - - // set score for the word in each file - recordFiles.forEach((file) => { - if (!scoreMap.has(file)) scoreMap.set(file, {}); - scoreMap.get(file)[word] = record.score; - }); - }); - - // create the mapping - files.forEach((file) => { - if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1) - fileMap.get(file).push(word); - else fileMap.set(file, [word]); - }); - }); - - // now check if the files don't contain excluded terms - const results = []; - for (const [file, wordList] of fileMap) { - // check if all requirements are matched - - // as search terms with length < 3 are discarded - const filteredTermCount = [...searchTerms].filter( - (term) => term.length > 2 - ).length; - if ( - wordList.length !== searchTerms.size && - wordList.length !== filteredTermCount - ) - continue; - - // ensure that none of the excluded terms is in the search result - if ( - [...excludedTerms].some( - (term) => - terms[term] === file || - titleTerms[term] === file || - (terms[term] || []).includes(file) || - (titleTerms[term] || []).includes(file) - ) - ) - break; - - // select one (max) score for the file. - const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w])); - // add result to the result list - results.push([ - docNames[file], - titles[file], - "", - null, - score, - filenames[file], - ]); - } - return results; - }, - - /** - * helper function to return a node containing the - * search summary for a given text. keywords is a list - * of stemmed words. - */ - makeSearchSummary: (htmlText, keywords) => { - const text = Search.htmlToText(htmlText); - if (text === "") return null; - - const textLower = text.toLowerCase(); - const actualStartPosition = [...keywords] - .map((k) => textLower.indexOf(k.toLowerCase())) - .filter((i) => i > -1) - .slice(-1)[0]; - const startWithContext = Math.max(actualStartPosition - 120, 0); - - const top = startWithContext === 0 ? "" : "..."; - const tail = startWithContext + 240 < text.length ? "..." : ""; - - let summary = document.createElement("p"); - summary.classList.add("context"); - summary.textContent = top + text.substr(startWithContext, 240).trim() + tail; - - return summary; - }, -}; - -_ready(Search.init); diff --git a/docs/_build/html/_static/sphinx_highlight.js b/docs/_build/html/_static/sphinx_highlight.js deleted file mode 100644 index aae669d..0000000 --- a/docs/_build/html/_static/sphinx_highlight.js +++ /dev/null @@ -1,144 +0,0 @@ -/* Highlighting utilities for Sphinx HTML documentation. */ -"use strict"; - -const SPHINX_HIGHLIGHT_ENABLED = true - -/** - * highlight a given string on a node by wrapping it in - * span elements with the given class name. - */ -const _highlight = (node, addItems, text, className) => { - if (node.nodeType === Node.TEXT_NODE) { - const val = node.nodeValue; - const parent = node.parentNode; - const pos = val.toLowerCase().indexOf(text); - if ( - pos >= 0 && - !parent.classList.contains(className) && - !parent.classList.contains("nohighlight") - ) { - let span; - - const closestNode = parent.closest("body, svg, foreignObject"); - const isInSVG = closestNode && closestNode.matches("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.classList.add(className); - } - - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - parent.insertBefore( - span, - parent.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling - ) - ); - node.nodeValue = val.substr(0, pos); - - if (isInSVG) { - const rect = document.createElementNS( - "http://www.w3.org/2000/svg", - "rect" - ); - const bbox = parent.getBBox(); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute("class", className); - addItems.push({ parent: parent, target: rect }); - } - } - } else if (node.matches && !node.matches("button, select, textarea")) { - node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); - } -}; -const _highlightText = (thisNode, text, className) => { - let addItems = []; - _highlight(thisNode, addItems, text, className); - addItems.forEach((obj) => - obj.parent.insertAdjacentElement("beforebegin", obj.target) - ); -}; - -/** - * Small JavaScript module for the documentation. - */ -const SphinxHighlight = { - - /** - * highlight the search words provided in localstorage in the text - */ - highlightSearchWords: () => { - if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight - - // get and clear terms from localstorage - const url = new URL(window.location); - const highlight = - localStorage.getItem("sphinx_highlight_terms") - || url.searchParams.get("highlight") - || ""; - localStorage.removeItem("sphinx_highlight_terms") - url.searchParams.delete("highlight"); - window.history.replaceState({}, "", url); - - // get individual terms from highlight string - const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); - if (terms.length === 0) return; // nothing to do - - // There should never be more than one element matching "div.body" - const divBody = document.querySelectorAll("div.body"); - const body = divBody.length ? divBody[0] : document.querySelector("body"); - window.setTimeout(() => { - terms.forEach((term) => _highlightText(body, term, "highlighted")); - }, 10); - - const searchBox = document.getElementById("searchbox"); - if (searchBox === null) return; - searchBox.appendChild( - document - .createRange() - .createContextualFragment( - '" - ) - ); - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords: () => { - document - .querySelectorAll("#searchbox .highlight-link") - .forEach((el) => el.remove()); - document - .querySelectorAll("span.highlighted") - .forEach((el) => el.classList.remove("highlighted")); - localStorage.removeItem("sphinx_highlight_terms") - }, - - initEscapeListener: () => { - // only install a listener if it is really needed - if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; - - document.addEventListener("keydown", (event) => { - // bail for input elements - if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; - // bail with special keys - if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; - if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { - SphinxHighlight.hideSearchWords(); - event.preventDefault(); - } - }); - }, -}; - -_ready(SphinxHighlight.highlightSearchWords); -_ready(SphinxHighlight.initEscapeListener); diff --git a/docs/_build/html/_static/togglebutton.css b/docs/_build/html/_static/togglebutton.css deleted file mode 100644 index 54a6787..0000000 --- a/docs/_build/html/_static/togglebutton.css +++ /dev/null @@ -1,160 +0,0 @@ -/** - * Admonition-based toggles - */ - -/* Visibility of the target */ -.admonition.toggle .admonition-title ~ * { - transition: opacity .3s, height .3s; -} - -/* Toggle buttons inside admonitions so we see the title */ -.admonition.toggle { - position: relative; -} - -/* Titles should cut off earlier to avoid overlapping w/ button */ -.admonition.toggle .admonition-title { - padding-right: 25%; - cursor: pointer; -} - -/* Hovering will cause a slight shift in color to make it feel interactive */ -.admonition.toggle .admonition-title:hover { - box-shadow: inset 0 0 0px 20px rgb(0 0 0 / 1%); -} - -/* Hovering will cause a slight shift in color to make it feel interactive */ -.admonition.toggle .admonition-title:active { - box-shadow: inset 0 0 0px 20px rgb(0 0 0 / 3%); -} - -/* Remove extra whitespace below the admonition title when hidden */ -.admonition.toggle-hidden { - padding-bottom: 0; -} - -.admonition.toggle-hidden .admonition-title { - margin-bottom: 0; -} - -/* hides all the content of a page until de-toggled */ -.admonition.toggle-hidden .admonition-title ~ * { - height: 0; - margin: 0; - opacity: 0; - visibility: hidden; -} - -/* General button style and position*/ -button.toggle-button { - /** - * Background and shape. By default there's no background - * but users can style as they wish - */ - background: none; - border: none; - outline: none; - - /* Positioning just inside the admonition title */ - position: absolute; - right: 0.5em; - padding: 0px; - border: none; - outline: none; -} - -/* Display the toggle hint on wide screens */ -@media (min-width: 768px) { - button.toggle-button.toggle-button-hidden:before { - content: attr(data-toggle-hint); /* This will be filled in by JS */ - font-size: .8em; - align-self: center; - } -} - -/* Icon behavior */ -.tb-icon { - transition: transform .2s ease-out; - height: 1.5em; - width: 1.5em; - stroke: currentColor; /* So that we inherit the color of other text */ -} - -/* The icon should point right when closed, down when open. */ -/* Open */ -.admonition.toggle button .tb-icon { - transform: rotate(90deg); -} - -/* Closed */ -.admonition.toggle button.toggle-button-hidden .tb-icon { - transform: rotate(0deg); -} - -/* With details toggles, we don't rotate the icon so it points right */ -details.toggle-details .tb-icon { - height: 1.4em; - width: 1.4em; - margin-top: 0.1em; /* To center the button vertically */ -} - - -/** - * Details-based toggles. - * In this case, we wrap elements with `.toggle` in a details block. - */ - -/* Details blocks */ -details.toggle-details { - margin: 1em 0; -} - - -details.toggle-details summary { - display: flex; - align-items: center; - cursor: pointer; - list-style: none; - border-radius: .2em; - border-left: 3px solid #1976d2; - background-color: rgb(204 204 204 / 10%); - padding: 0.2em 0.7em 0.3em 0.5em; /* Less padding on left because the SVG has left margin */ - font-size: 0.9em; -} - -details.toggle-details summary:hover { - background-color: rgb(204 204 204 / 20%); -} - -details.toggle-details summary:active { - background: rgb(204 204 204 / 28%); -} - -.toggle-details__summary-text { - margin-left: 0.2em; -} - -details.toggle-details[open] summary { - margin-bottom: .5em; -} - -details.toggle-details[open] summary .tb-icon { - transform: rotate(90deg); -} - -details.toggle-details[open] summary ~ * { - animation: toggle-fade-in .3s ease-out; -} - -@keyframes toggle-fade-in { - from {opacity: 0%;} - to {opacity: 100%;} -} - -/* Print rules - we hide all toggle button elements at print */ -@media print { - /* Always hide the summary so the button doesn't show up */ - details.toggle-details summary { - display: none; - } -} \ No newline at end of file diff --git a/docs/_build/html/_static/togglebutton.js b/docs/_build/html/_static/togglebutton.js deleted file mode 100644 index 215a7ee..0000000 --- a/docs/_build/html/_static/togglebutton.js +++ /dev/null @@ -1,187 +0,0 @@ -/** - * Add Toggle Buttons to elements - */ - -let toggleChevron = ` - - - -`; - -var initToggleItems = () => { - var itemsToToggle = document.querySelectorAll(togglebuttonSelector); - console.log(`[togglebutton]: Adding toggle buttons to ${itemsToToggle.length} items`) - // Add the button to each admonition and hook up a callback to toggle visibility - itemsToToggle.forEach((item, index) => { - if (item.classList.contains("admonition")) { - // If it's an admonition block, then we'll add a button inside - // Generate unique IDs for this item - var toggleID = `toggle-${index}`; - var buttonID = `button-${toggleID}`; - - item.setAttribute('id', toggleID); - if (!item.classList.contains("toggle")){ - item.classList.add("toggle"); - } - // This is the button that will be added to each item to trigger the toggle - var collapseButton = ` - `; - - title = item.querySelector(".admonition-title") - title.insertAdjacentHTML("beforeend", collapseButton); - thisButton = document.getElementById(buttonID); - - // Add click handlers for the button + admonition title (if admonition) - admonitionTitle = document.querySelector(`#${toggleID} > .admonition-title`) - if (admonitionTitle) { - // If an admonition, then make the whole title block clickable - admonitionTitle.addEventListener('click', toggleClickHandler); - admonitionTitle.dataset.target = toggleID - admonitionTitle.dataset.button = buttonID - } else { - // If not an admonition then we'll listen for the button click - thisButton.addEventListener('click', toggleClickHandler); - } - - // Now hide the item for this toggle button unless explicitly noted to show - if (!item.classList.contains("toggle-shown")) { - toggleHidden(thisButton); - } - } else { - // If not an admonition, wrap the block in a
block - // Define the structure of the details block and insert it as a sibling - var detailsBlock = ` -
- - ${toggleChevron} - ${toggleHintShow} - -
`; - item.insertAdjacentHTML("beforebegin", detailsBlock); - - // Now move the toggle-able content inside of the details block - details = item.previousElementSibling - details.appendChild(item) - item.classList.add("toggle-details__container") - - // Set up a click trigger to change the text as needed - details.addEventListener('click', (click) => { - let parent = click.target.parentElement; - if (parent.tagName.toLowerCase() == "details") { - summary = parent.querySelector("summary"); - details = parent; - } else { - summary = parent; - details = parent.parentElement; - } - // Update the inner text for the proper hint - if (details.open) { - summary.querySelector("span.toggle-details__summary-text").innerText = toggleHintShow; - } else { - summary.querySelector("span.toggle-details__summary-text").innerText = toggleHintHide; - } - - }); - - // If we have a toggle-shown class, open details block should be open - if (item.classList.contains("toggle-shown")) { - details.click(); - } - } - }) -}; - -// This should simply add / remove the collapsed class and change the button text -var toggleHidden = (button) => { - target = button.dataset['target'] - var itemToToggle = document.getElementById(target); - if (itemToToggle.classList.contains("toggle-hidden")) { - itemToToggle.classList.remove("toggle-hidden"); - button.classList.remove("toggle-button-hidden"); - } else { - itemToToggle.classList.add("toggle-hidden"); - button.classList.add("toggle-button-hidden"); - } -} - -var toggleClickHandler = (click) => { - // Be cause the admonition title is clickable and extends to the whole admonition - // We only look for a click event on this title to trigger the toggle. - - if (click.target.classList.contains("admonition-title")) { - button = click.target.querySelector(".toggle-button"); - } else if (click.target.classList.contains("tb-icon")) { - // We've clicked the icon and need to search up one parent for the button - button = click.target.parentElement; - } else if (click.target.tagName == "polyline") { - // We've clicked the SVG elements inside the button, need to up 2 layers - button = click.target.parentElement.parentElement; - } else if (click.target.classList.contains("toggle-button")) { - // We've clicked the button itself and so don't need to do anything - button = click.target; - } else { - console.log(`[togglebutton]: Couldn't find button for ${click.target}`) - } - target = document.getElementById(button.dataset['button']); - toggleHidden(target); -} - -// If we want to blanket-add toggle classes to certain cells -var addToggleToSelector = () => { - const selector = ""; - if (selector.length > 0) { - document.querySelectorAll(selector).forEach((item) => { - item.classList.add("toggle"); - }) - } -} - -// Helper function to run when the DOM is finished -const sphinxToggleRunWhenDOMLoaded = cb => { - if (document.readyState != 'loading') { - cb() - } else if (document.addEventListener) { - document.addEventListener('DOMContentLoaded', cb) - } else { - document.attachEvent('onreadystatechange', function() { - if (document.readyState == 'complete') cb() - }) - } -} -sphinxToggleRunWhenDOMLoaded(addToggleToSelector) -sphinxToggleRunWhenDOMLoaded(initToggleItems) - -/** Toggle details blocks to be open when printing */ -if (toggleOpenOnPrint == "true") { - window.addEventListener("beforeprint", () => { - // Open the details - document.querySelectorAll("details.toggle-details").forEach((el) => { - el.dataset["togglestatus"] = el.open; - el.open = true; - }); - - // Open the admonitions - document.querySelectorAll(".admonition.toggle.toggle-hidden").forEach((el) => { - console.log(el); - el.querySelector("button.toggle-button").click(); - el.dataset["toggle_after_print"] = "true"; - }); - }); - window.addEventListener("afterprint", () => { - // Re-close the details that were closed - document.querySelectorAll("details.toggle-details").forEach((el) => { - el.open = el.dataset["togglestatus"] == "true"; - delete el.dataset["togglestatus"]; - }); - - // Re-close the admonition toggle buttons - document.querySelectorAll(".admonition.toggle").forEach((el) => { - if (el.dataset["toggle_after_print"] == "true") { - el.querySelector("button.toggle-button").click(); - delete el.dataset["toggle_after_print"]; - } - }); - }); -} diff --git a/docs/_build/html/search.html b/docs/_build/html/search.html deleted file mode 100644 index 9a58958..0000000 --- a/docs/_build/html/search.html +++ /dev/null @@ -1,138 +0,0 @@ - - - - - - Search — mambular 26.05.2024 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
-
    -
  • - -
  • -
  • -
-
-
-
-
- - - - -
- -
- -
-
-
- -
- -
-

© Copyright 2024, Christoph Weisser.

-
- - Built with Sphinx using a - theme - provided by Read the Docs. - - -
-
-
-
-
- - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/searchindex.js b/docs/_build/html/searchindex.js deleted file mode 100644 index 11fa3c4..0000000 --- a/docs/_build/html/searchindex.js +++ /dev/null @@ -1 +0,0 @@ -Search.setIndex({"docnames": ["api/base_models/BaseModels", "api/base_models/index", "api/models/Models", "api/models/index", "index", "installation", "mambular", "quickstart"], "filenames": ["api\\base_models\\BaseModels.rst", "api\\base_models\\index.rst", "api\\models\\Models.rst", "api\\models\\index.rst", "index.rst", "installation.md", "mambular.md", "quickstart.md"], "titles": ["Base Models", "BaseModels", "Models", "Models", "Mambular: Tabular Deep Learning with Mamba Architectures", "Installation", "Mambular: Tabular Deep Learning with Mamba Architectures", "Quickstart"], "terms": {"class": [0, 1, 2, 3], "mambular": [0, 1, 2, 3, 5], "base_model": 0, "classifi": [0, 2], "basemambularclassifi": [0, 1, 2], "num_class": 0, "config": [0, 2], "cat_feature_info": 0, "none": [0, 2], "num_feature_info": 0, "lr": [0, 4, 6], "0": [0, 4, 6], "001": 0, "lr_patienc": 0, "10": [0, 4, 6], "weight_decai": 0, "025": 0, "lr_factor": 0, "75": 0, "sourc": [0, 2, 5], "A": [0, 2], "build": 0, "classif": [0, 1, 3, 4, 6, 7], "us": [0, 2], "architectur": [0, 2], "within": [0, 4, 6], "pytorch": [0, 2, 4, 6], "lightn": [0, 2, 4, 6], "framework": [0, 4, 6], "thi": [0, 1, 2, 3, 4, 5, 6], "integr": [0, 2, 4, 6], "variou": [0, 1, 2, 3, 4, 6], "compon": [0, 2, 4, 6], "embed": [0, 7], "categor": [0, 2, 4, 6], "numer": [0, 2, 4, 6], "featur": [0, 2], "process": [0, 4, 6], "sequenc": [0, 1, 3], "head": 0, "predict": [0, 2, 4, 6], "It": [0, 2], "support": [0, 4, 6], "multi": [0, 1, 3], "binari": [0, 1, 3], "task": [0, 1, 3, 4, 6], "paramet": [0, 2, 4, 6], "int": 0, "The": [0, 2, 4, 6], "number": [0, 2, 4, 6], "For": [0, 4, 6], "should": 0, "2": [0, 4, 6], "mambularconfig": [0, 2], "an": [0, 2, 4, 6], "instanc": [0, 4, 6], "contain": [0, 2], "configur": [0, 2, 4, 6], "dict": 0, "option": [0, 2, 4, 6], "dictionari": 0, "map": 0, "name": 0, "uniqu": [0, 4, 6], "categori": [0, 4, 6], "inform": [0, 4, 6], "i": [0, 2, 4, 5, 6], "layer": 0, "default": 0, "size": [0, 4, 6], "input": [0, 2, 4, 6], "dimens": 0, "float": 0, "learn": [0, 2, 3], "rate": 0, "optim": [0, 4, 6], "1e": [0, 4, 6], "03": [0, 4, 6], "epoch": 0, "improv": 0, "after": [0, 2], "which": [0, 2], "reduc": 0, "weight": 0, "decai": 0, "l2": 0, "penalti": 0, "factor": 0, "embedding_activ": 0, "activ": 0, "function": [0, 1, 3], "appli": [0, 4, 6], "linear": 0, "transform": 0, "type": [0, 1, 2, 3], "nn": [0, 2], "modul": [0, 1, 2, 3, 4, 6], "num_embed": 0, "list": [0, 2], "sequenti": [0, 4, 6], "each": [0, 4, 6], "correspond": [0, 4, 6], "modulelist": 0, "cat_embed": 0, "mamba": [0, 5], "norm_f": 0, "normal": [0, 2, 4, 6], "tabular_head": 0, "label": 0, "from": [0, 4, 5, 6], "aggreg": 0, "represent": 0, "pooling_method": 0, "method": [0, 2, 4, 6], "across": 0, "ar": [0, 2, 4, 6], "avg": 0, "max": 0, "sum": [0, 4, 6], "str": 0, "loss_fct": 0, "loss": [0, 4, 6], "train": [0, 2, 4, 6], "acc": 0, "metric": 0, "comput": 0, "accuraci": 0, "torchmetr": 0, "auroc": 0, "area": 0, "under": [0, 4, 6], "receiv": [0, 4, 6], "oper": 0, "characterist": [0, 4, 6], "curv": [0, 4, 6], "precis": [0, 4, 6], "forward": 0, "cat_featur": 0, "num_featur": 0, "defin": [0, 4, 6], "pass": [0, 2], "both": [0, 2, 4, 6], "produc": 0, "training_step": 0, "batch": 0, "batch_idx": 0, "perform": [0, 4, 6], "singl": [0, 4, 6], "step": [0, 2, 4, 5, 6], "log": 0, "set": [0, 2, 4, 6], "validation_step": 0, "valid": [0, 2, 4, 6], "configure_optim": 0, "": [0, 2, 3, 4, 6], "schedul": 0, "attribut": 0, "automatic_optim": 0, "If": 0, "fals": 0, "you": [0, 4, 5, 6], "respons": [0, 2, 4, 6], "call": [0, 2], "backward": 0, "zero_grad": 0, "current_epoch": 0, "current": 0, "trainer": 0, "attach": 0, "devic": 0, "dtype": 0, "example_input_arrai": 0, "exampl": [0, 7], "arrai": [0, 4, 6], "specif": [0, 2, 4, 6], "what": [0, 4, 6], "can": [0, 2, 4, 6], "consum": 0, "fabric": 0, "global_rank": 0, "index": 0, "all": [0, 4, 6], "node": 0, "global_step": 0, "total": 0, "seen": 0, "hparam": 0, "collect": 0, "hyperparamet": [0, 4, 6], "save": 0, "save_hyperparamet": 0, "hparams_initi": 0, "local_rank": 0, "logger": 0, "refer": 0, "object": [0, 2], "on_gpu": 0, "return": 0, "true": 0, "locat": [0, 4, 6], "gpu": 0, "strict_load": 0, "determin": 0, "how": 0, "load": [0, 2], "load_state_dict": 0, "strict": 0, "__call__": 0, "up": 0, "provid": [0, 1, 2, 3, 4, 6], "lr_schedul": 0, "tensor": 0, "output": 0, "dure": [0, 2], "tupl": 0, "data": [0, 1, 2, 3], "dataload": 0, "distribut": [0, 1, 3, 7], "basemambularlss": [0, 1], "famili": [0, 1, 3, 4, 6], "distribution_param": 0, "likelihood": [0, 4, 6], "statist": [0, 1, 3, 4, 6], "lss": 0, "built": [0, 4, 6], "tabular": [0, 2, 5], "design": [0, 2, 4, 6], "accommod": 0, "differ": [0, 1, 3, 4, 6], "regress": [0, 1, 2, 3, 7], "includ": [0, 2, 4, 6], "poisson": [0, 4, 6], "gamma": [0, 4, 6], "beta": [0, 4, 6], "dirichlet": [0, 4, 6], "studentt": [0, 4, 6], "negativebinom": [0, 4, 6], "inversegamma": [0, 4, 6], "initi": [0, 2, 4, 6], "coeffici": 0, "addit": [0, 2], "chosen": 0, "core": 0, "neural": [0, 2], "network": [0, 2], "implement": [0, 2, 4, 6, 7], "block": 0, "final": 0, "deriv": 0, "callabl": 0, "typic": 0, "embedding_classifi": 0, "baseembeddingmambularclassifi": 0, "seq_siz": 0, "20": 0, "raw_embed": 0, "special": [0, 1, 3], "protein": [0, 1, 3], "raw": 0, "make": [0, 2, 4, 5, 6], "suitabl": [0, 4, 6], "complex": [0, 1, 3, 4, 6], "about": 0, "regular": 0, "chunk": 0, "relev": 0, "when": [0, 2, 4, 6], "bool": 0, "indic": 0, "whether": 0, "directli": 0, "them": [0, 4, 6], "target": 0, "depend": 0, "embedding_regressor": 0, "baseembeddingmambularregressor": [0, 1], "mseloss": 0, "regressor": [0, 2], "basemambularregressor": [0, 1, 2], "incorpor": [0, 2], "train_ms": 0, "mean": [0, 4, 6], "squar": 0, "error": 0, "meansquarederror": 0, "val_ms": 0, "torch": [0, 2], "base": [1, 2], "model": [1, 7], "descript": [1, 3], "embeddingmambularclassifi": [1, 2, 3], "sklearn_classifi": 2, "mambularclassifi": [2, 3, 4, 6], "kwarg": 2, "mimic": 2, "scikit": [2, 3, 4, 6], "api": [2, 4, 6], "custom": [2, 4, 6], "work": [2, 4, 6], "flexibl": 2, "interfac": [2, 3, 4, 6], "specifi": 2, "preprocess": 2, "smoothli": 2, "util": 2, "cross": [2, 4, 6], "grid": 2, "search": 2, "accept": 2, "ani": [2, 4, 6], "keyword": 2, "argument": 2, "preprocessor": 2, "known": [2, 4, 6], "extract": 2, "predefin": 2, "rest": 2, "hold": 2, "handl": 2, "like": [2, 4, 6], "encod": [2, 4, 6], "underli": [2, 4, 6], "instanti": 2, "upon": 2, "fit": 2, "sklearn_distribut": 2, "mambularlss": [2, 3], "machin": [2, 4, 6], "estim": 2, "structur": 2, "deep": 2, "facilit": 2, "end": 2, "workflow": [2, 4, 6], "separ": 2, "allow": [2, 4, 6], "adjust": 2, "arbitrari": 2, "divid": 2, "recogn": 2, "kei": 2, "d_model": [2, 4, 6], "n_layer": [2, 4, 6], "etc": 2, "assum": 2, "sklearn_embedding_classifi": 2, "proteinmambularclassifi": 2, "compat": [2, 4, 6], "encapsul": 2, "offer": [2, 4, 6], "probabl": [2, 4, 6], "manner": 2, "akin": 2, "unrecogn": 2, "store": 2, "sklearn_embedding_regressor": 2, "embeddingmambularregressor": [2, 3], "sklearn": [2, 4, 6], "proteinmambularregressor": 2, "wrap": 2, "get": 2, "wai": 2, "sklearn_regressor": 2, "mambularregressor": [2, 3, 4, 6], "follow": [2, 5], "convent": 2, "straightforward": 2, "seamlessli": [2, 4, 6], "tool": [2, 4, 6], "constructor": 2, "remain": 2, "variabl": [2, 4, 6], "scale": [2, 4, 6], "adher": [3, 4, 6], "baseestim": [3, 4, 6], "python": [4, 6], "packag": [4, 5, 6], "bring": [4, 6], "power": [4, 6], "suit": [4, 6], "eas": [4, 6], "mind": [4, 6], "highli": [4, 6], "familiar": [4, 6], "ecosystem": [4, 6], "just": [4, 6], "would": [4, 6], "tradit": [4, 6], "ad": [4, 6], "comprehens": [4, 6], "cater": [4, 6], "wide": [4, 6], "rang": [4, 6], "state": [4, 6], "art": [4, 6], "leverag": [4, 6], "its": [4, 6], "effect": [4, 6], "time": [4, 6], "seri": [4, 6], "space": [4, 6], "adapt": [4, 6], "here": [4, 6], "seamless": [4, 6], "effortlessli": [4, 6], "easi": [4, 6], "inclus": [4, 6], "exist": [4, 6], "pipelin": [4, 6], "tune": [4, 6], "extens": [4, 6], "come": [4, 6], "broad": [4, 6], "techniqu": [4, 6], "ensur": [4, 6], "your": [4, 6], "prepar": [4, 6], "predict_proba": [4, 6], "minim": [4, 6], "those": [4, 6], "alreadi": [4, 6], "accustom": [4, 6], "hood": [4, 6], "top": [4, 6], "benefit": [4, 6], "streamlin": [4, 6], "advanc": [4, 6], "16": [4, 6], "bit": [4, 6], "find": [4, 6], "pip": [4, 5, 6], "elev": [4, 6], "stage": [4, 6], "develop": [4, 6], "emploi": [4, 6], "sophist": [4, 6], "best": [4, 6], "shape": [4, 6], "our": [4, 6], "intuit": [4, 6], "effici": [4, 6], "automat": [4, 6], "identifi": [4, 6], "dataset": [4, 6], "most": [4, 6], "ordin": [4, 6], "preserv": [4, 6], "inher": [4, 6], "order": [4, 6], "readi": [4, 6], "One": [4, 6], "hot": [4, 6], "nomin": [4, 6], "one": [4, 6], "captur": [4, 6], "presenc": [4, 6], "absenc": [4, 6], "without": [4, 6], "impos": [4, 6], "bin": [4, 6], "discret": [4, 6], "continu": [4, 6], "certain": [4, 6], "context": [4, 6], "decis": [4, 6], "tree": [4, 6], "strategi": [4, 6], "enhanc": [4, 6], "interpret": [4, 6], "easili": [4, 6], "turn": [4, 6], "standard": [4, 6], "per": [4, 6], "possibl": [4, 6], "similarli": [4, 6], "instead": [4, 6], "gracefulli": [4, 6], "imput": [4, 6], "mode": [4, 6], "ones": [4, 6], "complet": [4, 6], "manual": [4, 6], "intervent": [4, 6], "while": [4, 6], "excel": [4, 6], "autom": [4, 6], "also": [4, 6], "need": [4, 6], "re": [4, 6], "lock": [4, 6], "approach": [4, 6], "By": [4, 6], "commit": [4, 6], "qualiti": [4, 6], "apart": [4, 6], "indispens": [4, 6], "arsen": [4, 6], "simpl": [4, 6], "thu": [4, 6], "addition": [4, 6], "other": [4, 6], "import": [4, 6], "dropout": [4, 6], "01": [4, 6], "128": [4, 6], "6": [4, 6], "numerical_preprocess": [4, 6], "x": [4, 6], "datafram": [4, 6], "someth": [4, 6], "pd": [4, 6], "np": [4, 6], "y": [4, 6], "max_epoch": [4, 6], "500": [4, 6], "patienc": [4, 6], "25": [4, 6], "obtain": [4, 6], "pred": [4, 6], "introduc": [4, 6], "cut": [4, 6], "edg": [4, 6], "through": [4, 6], "empow": [4, 6], "user": [4, 6], "full": [4, 6], "particularli": [4, 6], "valuabl": [4, 6], "scenario": [4, 6], "where": [4, 5, 6], "understand": [4, 6], "skew": [4, 6], "kurtosi": [4, 6], "crucial": [4, 6], "central": [4, 6], "tendenc": [4, 6], "unlik": [4, 6], "e": [4, 6], "g": [4, 6], "entir": [4, 6], "more": [4, 6], "quantil": [4, 6], "varianc": [4, 6], "higher": [4, 6], "moment": [4, 6], "varieti": [4, 6], "gaussian": [4, 6], "binomi": [4, 6], "count": [4, 6], "nuanc": [4, 6], "uncertainti": [4, 6], "richer": [4, 6], "enabl": [4, 6], "robust": [4, 6], "uncertain": [4, 6], "environ": [4, 6], "symmetr": [4, 6], "around": [4, 6], "repres": [4, 6], "event": [4, 6], "occur": [4, 6], "fix": [4, 6], "interv": [4, 6], "bound": [4, 6], "zero": [4, 6], "often": [4, 6], "wait": [4, 6], "between": [4, 6], "1": [4, 6], "proport": [4, 6], "percentag": [4, 6], "multivari": [4, 6], "individu": [4, 6], "correl": [4, 6], "constrain": [4, 6], "student": [4, 6], "t": [4, 6], "heavier": [4, 6], "tail": [4, 6], "than": [4, 6], "sampl": [4, 6], "small": [4, 6], "neg": [4, 6], "over": [4, 6], "dispers": [4, 6], "rel": [4, 6], "invers": [4, 6], "prior": [4, 6], "bayesian": [4, 6], "infer": [4, 6], "two": [4, 6], "These": [4, 6], "flexibli": [4, 6], "risk": [4, 6], "assess": [4, 6], "In": [4, 6], "financ": [4, 6], "insur": [4, 6], "potenti": [4, 6], "averag": [4, 6], "outcom": [4, 6], "demand": [4, 6], "forecast": [4, 6], "inventori": [4, 6], "manag": [4, 6], "product": [4, 6], "help": [4, 6], "stock": [4, 6], "level": [4, 6], "person": [4, 6], "medicin": [4, 6], "healthcar": [4, 6], "patient": [4, 6], "treatment": [4, 6], "aid": [4, 6], "therapi": [4, 6], "plan": [4, 6], "To": [4, 6], "desir": [4, 6], "similar": [4, 6], "256": [4, 6], "4": [4, 6], "300": [4, 6], "pleas": 5, "below": 5, "cd": 5, "note": 5, "sure": 5, "same": 5, "directori": 5, "setup": 5, "py": 5, "file": 5, "resid": 5, "so": 5, "far": 5, "avail": 5, "pypi": 5, "code": 7}, "objects": {"mambular.base_models.classifier": [[0, 0, 1, "", "BaseMambularClassifier"]], "mambular.base_models.classifier.BaseMambularClassifier": [[0, 1, 1, "", "acc"], [0, 1, 1, "", "auroc"], [0, 1, 1, "", "cat_embeddings"], [0, 2, 1, "id0", "configure_optimizers"], [0, 1, 1, "", "embedding_activation"], [0, 2, 1, "id1", "forward"], [0, 1, 1, "", "loss_fct"], [0, 1, 1, "", "mamba"], [0, 1, 1, "", "norm_f"], [0, 1, 1, "", "num_embeddings"], [0, 1, 1, "", "pooling_method"], [0, 1, 1, "", "precision"], [0, 1, 1, "", "tabular_head"], [0, 2, 1, "id2", "training_step"], [0, 2, 1, "id3", "validation_step"]], "mambular.base_models.distributional": [[0, 0, 1, "", "BaseMambularLSS"]], "mambular.base_models.distributional.BaseMambularLSS": [[0, 2, 1, "id4", "configure_optimizers"], [0, 2, 1, "id5", "forward"], [0, 1, 1, "", "loss_fct"], [0, 1, 1, "", "mamba"], [0, 1, 1, "", "norm_f"], [0, 1, 1, "", "tabular_head"], [0, 2, 1, "id6", "training_step"], [0, 2, 1, "id7", "validation_step"]], "mambular.base_models.embedding_classifier": [[0, 0, 1, "", "BaseEmbeddingMambularClassifier"]], "mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier": [[0, 2, 1, "id8", "configure_optimizers"], [0, 2, 1, "id9", "forward"], [0, 1, 1, "", "mamba"], [0, 1, 1, "", "norm_f"], [0, 1, 1, "", "tabular_head"], [0, 2, 1, "id10", "training_step"], [0, 2, 1, "id11", "validation_step"]], "mambular.base_models.embedding_regressor": [[0, 0, 1, "", "BaseEmbeddingMambularRegressor"]], "mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor": [[0, 2, 1, "id12", "configure_optimizers"], [0, 2, 1, "id13", "forward"], [0, 1, 1, "", "loss_fct"], [0, 1, 1, "", "mamba"], [0, 1, 1, "", "norm_f"], [0, 1, 1, "", "tabular_head"], [0, 2, 1, "id14", "training_step"], [0, 2, 1, "id15", "validation_step"]], "mambular.base_models.regressor": [[0, 0, 1, "", "BaseMambularRegressor"]], "mambular.base_models.regressor.BaseMambularRegressor": [[0, 2, 1, "id16", "configure_optimizers"], [0, 2, 1, "id17", "forward"], [0, 1, 1, "", "loss_fct"], [0, 1, 1, "", "mamba"], [0, 1, 1, "", "norm_f"], [0, 1, 1, "", "tabular_head"], [0, 1, 1, "", "train_mse"], [0, 2, 1, "id18", "training_step"], [0, 1, 1, "", "val_mse"], [0, 2, 1, "id19", "validation_step"]], "mambular.models.sklearn_classifier": [[2, 0, 1, "", "MambularClassifier"]], "mambular.models.sklearn_classifier.MambularClassifier": [[2, 1, 1, "", "config"], [2, 1, 1, "", "model"], [2, 1, 1, "", "preprocessor"]], "mambular.models.sklearn_distributional": [[2, 0, 1, "", "MambularLSS"]], "mambular.models.sklearn_distributional.MambularLSS": [[2, 1, 1, "", "config"], [2, 1, 1, "", "model"], [2, 1, 1, "", "preprocessor"]], "mambular.models.sklearn_embedding_classifier": [[2, 0, 1, "", "EmbeddingMambularClassifier"]], "mambular.models.sklearn_embedding_classifier.EmbeddingMambularClassifier": [[2, 1, 1, "", "config"], [2, 1, 1, "", "model"], [2, 1, 1, "", "preprocessor"]], "mambular.models.sklearn_embedding_regressor": [[2, 0, 1, "", "EmbeddingMambularRegressor"]], "mambular.models.sklearn_embedding_regressor.EmbeddingMambularRegressor": [[2, 1, 1, "", "config"], [2, 1, 1, "", "model"], [2, 1, 1, "", "preprocessor"]], "mambular.models.sklearn_regressor": [[2, 0, 1, "", "MambularRegressor"]], "mambular.models.sklearn_regressor.MambularRegressor": [[2, 1, 1, "", "config"], [2, 1, 1, "", "model"], [2, 1, 1, "", "preprocessor"]]}, "objtypes": {"0": "py:class", "1": "py:attribute", "2": "py:method"}, "objnames": {"0": ["py", "class", "Python class"], "1": ["py", "attribute", "Python attribute"], "2": ["py", "method", "Python method"]}, "titleterms": {"base": 0, "model": [0, 2, 3, 4, 6], "basemodel": 1, "mambular": [4, 6], "tabular": [4, 6], "deep": [4, 6], "learn": [4, 6], "mamba": [4, 6], "architectur": [4, 6], "featur": [4, 6], "document": [4, 6], "instal": [4, 5, 6], "preprocess": [4, 6], "data": [4, 6], "type": [4, 6], "detect": [4, 6], "transform": [4, 6], "handl": [4, 6], "miss": [4, 6], "valu": [4, 6], "flexibl": [4, 6], "customiz": [4, 6], "fit": [4, 6], "distribut": [4, 6], "regress": [4, 6], "mambularlss": [4, 6], "kei": [4, 6], "avail": [4, 6], "class": [4, 6], "us": [4, 6], "case": [4, 6], "get": [4, 6], "start": [4, 6], "quickstart": 7}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 8, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.viewcode": 1, "sphinx.ext.intersphinx": 1, "sphinx.ext.todo": 2, "nbsphinx": 4, "sphinx": 57}, "alltitles": {"Base Models": [[0, "base-models"]], "BaseModels": [[1, "basemodels"]], "Models": [[2, "models"], [3, "models"]], "Mambular: Tabular Deep Learning with Mamba Architectures": [[4, "mambular-tabular-deep-learning-with-mamba-architectures"], [6, "mambular-tabular-deep-learning-with-mamba-architectures"]], "Features": [[4, "features"], [6, "features"]], "Documentation": [[4, "documentation"], [6, "documentation"]], "Installation": [[4, "installation"], [5, "installation"], [6, "installation"]], "Preprocessing": [[4, "preprocessing"], [6, "preprocessing"]], "Data Type Detection and Transformation": [[4, "data-type-detection-and-transformation"], [6, "data-type-detection-and-transformation"]], "Handling Missing Values": [[4, "handling-missing-values"], [6, "handling-missing-values"]], "Flexible and Customizable": [[4, "flexible-and-customizable"], [6, "flexible-and-customizable"]], "Fit a Model": [[4, "fit-a-model"], [6, "fit-a-model"]], "Distributional Regression with MambularLSS": [[4, "distributional-regression-with-mambularlss"], [6, "distributional-regression-with-mambularlss"]], "Key Features of MambularLSS:": [[4, "key-features-of-mambularlss"], [6, "key-features-of-mambularlss"]], "Available Distribution Classes:": [[4, "available-distribution-classes"], [6, "available-distribution-classes"]], "Use Cases for MambularLSS:": [[4, "use-cases-for-mambularlss"], [6, "use-cases-for-mambularlss"]], "Getting Started with MambularLSS:": [[4, "getting-started-with-mambularlss"], [6, "getting-started-with-mambularlss"]], "Quickstart": [[7, "quickstart"]]}, "indexentries": {"baseembeddingmambularclassifier (class in mambular.base_models.embedding_classifier)": [[0, "mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier"]], "baseembeddingmambularregressor (class in mambular.base_models.embedding_regressor)": [[0, "mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor"]], "basemambularclassifier (class in mambular.base_models.classifier)": [[0, "mambular.base_models.classifier.BaseMambularClassifier"]], "basemambularlss (class in mambular.base_models.distributional)": [[0, "mambular.base_models.distributional.BaseMambularLSS"]], "basemambularregressor (class in mambular.base_models.regressor)": [[0, "mambular.base_models.regressor.BaseMambularRegressor"]], "acc (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.acc"]], "auroc (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.auroc"]], "cat_embeddings (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.cat_embeddings"]], "configure_optimizers() (mambular.base_models.classifier.basemambularclassifier method)": [[0, "id0"], [0, "mambular.base_models.classifier.BaseMambularClassifier.configure_optimizers"]], "configure_optimizers() (mambular.base_models.distributional.basemambularlss method)": [[0, "id4"], [0, "mambular.base_models.distributional.BaseMambularLSS.configure_optimizers"]], "configure_optimizers() (mambular.base_models.embedding_classifier.baseembeddingmambularclassifier method)": [[0, "id8"], [0, "mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier.configure_optimizers"]], "configure_optimizers() (mambular.base_models.embedding_regressor.baseembeddingmambularregressor method)": [[0, "id12"], [0, "mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor.configure_optimizers"]], "configure_optimizers() (mambular.base_models.regressor.basemambularregressor method)": [[0, "id16"], [0, "mambular.base_models.regressor.BaseMambularRegressor.configure_optimizers"]], "embedding_activation (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.embedding_activation"]], "forward() (mambular.base_models.classifier.basemambularclassifier method)": [[0, "id1"], [0, "mambular.base_models.classifier.BaseMambularClassifier.forward"]], "forward() (mambular.base_models.distributional.basemambularlss method)": [[0, "id5"], [0, "mambular.base_models.distributional.BaseMambularLSS.forward"]], "forward() (mambular.base_models.embedding_classifier.baseembeddingmambularclassifier method)": [[0, "id9"], [0, "mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier.forward"]], "forward() (mambular.base_models.embedding_regressor.baseembeddingmambularregressor method)": [[0, "id13"], [0, "mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor.forward"]], "forward() (mambular.base_models.regressor.basemambularregressor method)": [[0, "id17"], [0, "mambular.base_models.regressor.BaseMambularRegressor.forward"]], "loss_fct (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.loss_fct"]], "loss_fct (mambular.base_models.distributional.basemambularlss attribute)": [[0, "mambular.base_models.distributional.BaseMambularLSS.loss_fct"]], "loss_fct (mambular.base_models.embedding_regressor.baseembeddingmambularregressor attribute)": [[0, "mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor.loss_fct"]], "loss_fct (mambular.base_models.regressor.basemambularregressor attribute)": [[0, "mambular.base_models.regressor.BaseMambularRegressor.loss_fct"]], "mamba (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.mamba"]], "mamba (mambular.base_models.distributional.basemambularlss attribute)": [[0, "mambular.base_models.distributional.BaseMambularLSS.mamba"]], "mamba (mambular.base_models.embedding_classifier.baseembeddingmambularclassifier attribute)": [[0, "mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier.mamba"]], "mamba (mambular.base_models.embedding_regressor.baseembeddingmambularregressor attribute)": [[0, "mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor.mamba"]], "mamba (mambular.base_models.regressor.basemambularregressor attribute)": [[0, "mambular.base_models.regressor.BaseMambularRegressor.mamba"]], "norm_f (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.norm_f"]], "norm_f (mambular.base_models.distributional.basemambularlss attribute)": [[0, "mambular.base_models.distributional.BaseMambularLSS.norm_f"]], "norm_f (mambular.base_models.embedding_classifier.baseembeddingmambularclassifier attribute)": [[0, "mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier.norm_f"]], "norm_f (mambular.base_models.embedding_regressor.baseembeddingmambularregressor attribute)": [[0, "mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor.norm_f"]], "norm_f (mambular.base_models.regressor.basemambularregressor attribute)": [[0, "mambular.base_models.regressor.BaseMambularRegressor.norm_f"]], "num_embeddings (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.num_embeddings"]], "pooling_method (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.pooling_method"]], "precision (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.precision"]], "tabular_head (mambular.base_models.classifier.basemambularclassifier attribute)": [[0, "mambular.base_models.classifier.BaseMambularClassifier.tabular_head"]], "tabular_head (mambular.base_models.distributional.basemambularlss attribute)": [[0, "mambular.base_models.distributional.BaseMambularLSS.tabular_head"]], "tabular_head (mambular.base_models.embedding_classifier.baseembeddingmambularclassifier attribute)": [[0, "mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier.tabular_head"]], "tabular_head (mambular.base_models.embedding_regressor.baseembeddingmambularregressor attribute)": [[0, "mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor.tabular_head"]], "tabular_head (mambular.base_models.regressor.basemambularregressor attribute)": [[0, "mambular.base_models.regressor.BaseMambularRegressor.tabular_head"]], "train_mse (mambular.base_models.regressor.basemambularregressor attribute)": [[0, "mambular.base_models.regressor.BaseMambularRegressor.train_mse"]], "training_step() (mambular.base_models.classifier.basemambularclassifier method)": [[0, "id2"], [0, "mambular.base_models.classifier.BaseMambularClassifier.training_step"]], "training_step() (mambular.base_models.distributional.basemambularlss method)": [[0, "id6"], [0, "mambular.base_models.distributional.BaseMambularLSS.training_step"]], "training_step() (mambular.base_models.embedding_classifier.baseembeddingmambularclassifier method)": [[0, "id10"], [0, "mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier.training_step"]], "training_step() (mambular.base_models.embedding_regressor.baseembeddingmambularregressor method)": [[0, "id14"], [0, "mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor.training_step"]], "training_step() (mambular.base_models.regressor.basemambularregressor method)": [[0, "id18"], [0, "mambular.base_models.regressor.BaseMambularRegressor.training_step"]], "val_mse (mambular.base_models.regressor.basemambularregressor attribute)": [[0, "mambular.base_models.regressor.BaseMambularRegressor.val_mse"]], "validation_step() (mambular.base_models.classifier.basemambularclassifier method)": [[0, "id3"], [0, "mambular.base_models.classifier.BaseMambularClassifier.validation_step"]], "validation_step() (mambular.base_models.distributional.basemambularlss method)": [[0, "id7"], [0, "mambular.base_models.distributional.BaseMambularLSS.validation_step"]], "validation_step() (mambular.base_models.embedding_classifier.baseembeddingmambularclassifier method)": [[0, "id11"], [0, "mambular.base_models.embedding_classifier.BaseEmbeddingMambularClassifier.validation_step"]], "validation_step() (mambular.base_models.embedding_regressor.baseembeddingmambularregressor method)": [[0, "id15"], [0, "mambular.base_models.embedding_regressor.BaseEmbeddingMambularRegressor.validation_step"]], "validation_step() (mambular.base_models.regressor.basemambularregressor method)": [[0, "id19"], [0, "mambular.base_models.regressor.BaseMambularRegressor.validation_step"]], "embeddingmambularclassifier (class in mambular.models.sklearn_embedding_classifier)": [[2, "mambular.models.sklearn_embedding_classifier.EmbeddingMambularClassifier"]], "embeddingmambularregressor (class in mambular.models.sklearn_embedding_regressor)": [[2, "mambular.models.sklearn_embedding_regressor.EmbeddingMambularRegressor"]], "mambularclassifier (class in mambular.models.sklearn_classifier)": [[2, "mambular.models.sklearn_classifier.MambularClassifier"]], "mambularlss (class in mambular.models.sklearn_distributional)": [[2, "mambular.models.sklearn_distributional.MambularLSS"]], "mambularregressor (class in mambular.models.sklearn_regressor)": [[2, "mambular.models.sklearn_regressor.MambularRegressor"]], "config (mambular.models.sklearn_classifier.mambularclassifier attribute)": [[2, "mambular.models.sklearn_classifier.MambularClassifier.config"]], "config (mambular.models.sklearn_distributional.mambularlss attribute)": [[2, "mambular.models.sklearn_distributional.MambularLSS.config"]], "config (mambular.models.sklearn_embedding_classifier.embeddingmambularclassifier attribute)": [[2, "mambular.models.sklearn_embedding_classifier.EmbeddingMambularClassifier.config"]], "config (mambular.models.sklearn_embedding_regressor.embeddingmambularregressor attribute)": [[2, "mambular.models.sklearn_embedding_regressor.EmbeddingMambularRegressor.config"]], "config (mambular.models.sklearn_regressor.mambularregressor attribute)": [[2, "mambular.models.sklearn_regressor.MambularRegressor.config"]], "model (mambular.models.sklearn_classifier.mambularclassifier attribute)": [[2, "mambular.models.sklearn_classifier.MambularClassifier.model"]], "model (mambular.models.sklearn_distributional.mambularlss attribute)": [[2, "mambular.models.sklearn_distributional.MambularLSS.model"]], "model (mambular.models.sklearn_embedding_classifier.embeddingmambularclassifier attribute)": [[2, "mambular.models.sklearn_embedding_classifier.EmbeddingMambularClassifier.model"]], "model (mambular.models.sklearn_embedding_regressor.embeddingmambularregressor attribute)": [[2, "mambular.models.sklearn_embedding_regressor.EmbeddingMambularRegressor.model"]], "model (mambular.models.sklearn_regressor.mambularregressor attribute)": [[2, "mambular.models.sklearn_regressor.MambularRegressor.model"]], "preprocessor (mambular.models.sklearn_classifier.mambularclassifier attribute)": [[2, "mambular.models.sklearn_classifier.MambularClassifier.preprocessor"]], "preprocessor (mambular.models.sklearn_distributional.mambularlss attribute)": [[2, "mambular.models.sklearn_distributional.MambularLSS.preprocessor"]], "preprocessor (mambular.models.sklearn_embedding_classifier.embeddingmambularclassifier attribute)": [[2, "mambular.models.sklearn_embedding_classifier.EmbeddingMambularClassifier.preprocessor"]], "preprocessor (mambular.models.sklearn_embedding_regressor.embeddingmambularregressor attribute)": [[2, "mambular.models.sklearn_embedding_regressor.EmbeddingMambularRegressor.preprocessor"]], "preprocessor (mambular.models.sklearn_regressor.mambularregressor attribute)": [[2, "mambular.models.sklearn_regressor.MambularRegressor.preprocessor"]]}}) \ No newline at end of file