diff --git a/.isort.cfg b/.isort.cfg index 6d1942f3..862bdfb8 100644 --- a/.isort.cfg +++ b/.isort.cfg @@ -4,4 +4,4 @@ include_trailing_comma=True force_grid_wrap=0 combine_as_imports=True line_length=80 -known_third_party=matplotlib,networkx,nltk,numpy,pandas,scipy,setuptools,sklearn,torch,torchtext \ No newline at end of file +known_third_party=GPUtil,matplotlib,networkx,nltk,numpy,pandas,scipy,setuptools,sklearn,torch,torchtext,tqdm \ No newline at end of file diff --git a/README.md b/README.md index b6b6697f..09be8775 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ Snorkel MeTaL uses a new matrix approximation approach to learn the accuracies o This makes it significantly more scalable than our previous approaches. ## References -* **Best Reference: [_Training Complex Models with Multi-Task Weak Supervision_](https://ajratner.github.io/assets/papers/mts-draft.pdf) [Technical Report]** +* **Best Reference: [_Training Complex Models with Multi-Task Weak Supervision_](https://arxiv.org/abs/1810.02840) [Technical Report]** * [Snorkel MeTaL: Weak Supervision for Multi-Task Learning](https://ajratner.github.io/assets/papers/deem-metal-prototype.pdf) [SIGMOD DEEM 2018] * _[Snorkel: Rapid Training Data Creation with Weak Supervision](https://arxiv.org/abs/1711.10160) [VLDB 2018]_ * _[Data Programming: Creating Large Training Sets, Quickly](https://arxiv.org/abs/1605.07723) [NIPS 2016]_ @@ -105,3 +105,9 @@ This will install a few additional tools that help to ensure that any commits or * [flake8](http://flake8.pycqa.org/en/latest/): PEP8 linting After running `make dev` to install the necessary tools, you can run `make check` to see if any changes you've made violate the repo standards and `make fix` to fix any related to isort/black. Fixes for flake8 violations will need to be made manually. + +### GPU Usage +MeTaL supports GPU usage, but does not include this in automatically-run tests; to run these tests, first install the requirements in `tests/gpu/requirements.txt`, then run: +``` +nosetests tests/gpu +``` \ No newline at end of file diff --git a/environment.yml b/environment.yml index 99e757b5..67b14911 100644 --- a/environment.yml +++ b/environment.yml @@ -13,4 +13,5 @@ dependencies: - pandas - pytorch=0.4.1 - runipy - - scipy \ No newline at end of file + - scipy + - tqdm diff --git a/metal/classifier.py b/metal/classifier.py index d5cc196b..ceea47d0 100644 --- a/metal/classifier.py +++ b/metal/classifier.py @@ -5,10 +5,13 @@ import torch import torch.nn as nn import torch.optim as optim +from scipy.sparse import issparse +from torch.utils.data import DataLoader, Dataset, TensorDataset +from tqdm import tqdm from metal.analysis import confusion_matrix from metal.metrics import metric_score -from metal.utils import Checkpointer, recursive_merge_dicts +from metal.utils import Checkpointer, place_on_gpu, recursive_merge_dicts class Classifier(nn.Module): @@ -44,10 +47,15 @@ def __init__(self, k, config): self.multitask = False self.k = k + # Set random seed if self.config["seed"] is None: self.config["seed"] = np.random.randint(1e6) self._set_seed(self.config["seed"]) + # Confirm that cuda is available if config is using CUDA + if self.config["use_cuda"] and not torch.cuda.is_available(): + raise ValueError("use_cuda=True but CUDA not available.") + def _set_seed(self, seed): self.seed = seed if torch.cuda.is_available(): @@ -121,21 +129,25 @@ def _create_checkpointer(self, checkpoint_config): model_class, **checkpoint_config, verbose=self.config["verbose"] ) - def _train(self, train_loader, loss_fn, X_dev=None, Y_dev=None): + def _train(self, train_data, loss_fn, dev_data=None): """The internal training routine called by train() after initial setup Args: - train_loader: a torch DataLoader of X (data) and Y (labels) for - the train split + train_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of + X (data) and Y (labels) for the train split loss_fn: the loss function to minimize (maps *data -> loss) - X_dev: the dev set model input - Y_dev: the dev set target labels + dev_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of + X (data) and Y (labels) for the dev split - If either of X_dev or Y_dev is not provided, then no checkpointing or + If dev_data is not provided, then no checkpointing or evaluation on the dev set will occur. """ train_config = self.config["train_config"] - evaluate_dev = X_dev is not None and Y_dev is not None + evaluate_dev = dev_data is not None + + # Convert data to DataLoaders + train_loader = self._create_data_loader(train_data) + dev_loader = self._create_data_loader(dev_data) # Set the optimizer optimizer = self._set_optimizer(train_config) @@ -150,13 +162,29 @@ def _train(self, train_loader, loss_fn, X_dev=None, Y_dev=None): train_config["checkpoint_config"] ) + # Moving model to GPU + if self.config["use_cuda"]: + if self.config["verbose"]: + print("Using GPU...") + self.cuda() + # Train the model for epoch in range(train_config["n_epochs"]): epoch_loss = 0.0 - for data in train_loader: + for batch_num, data in tqdm( + enumerate(train_loader), + total=len(train_loader), + disable=train_config["disable_prog_bar"], + ): + + # Moving data to GPU + if self.config["use_cuda"]: + data = place_on_gpu(data) + # Zero the parameter gradients optimizer.zero_grad() + # import pdb; pdb.set_trace() # Forward pass to calculate outputs loss = loss_fn(*data) if torch.isnan(loss): @@ -187,8 +215,12 @@ def _train(self, train_loader, loss_fn, X_dev=None, Y_dev=None): if evaluate_dev and (epoch % train_config["validation_freq"] == 0): val_metric = train_config["validation_metric"] dev_score = self.score( - X_dev, Y_dev, metric=val_metric, verbose=False + dev_loader, + metric=val_metric, + verbose=False, + print_confusion_matrix=False, ) + if train_config["checkpoint"]: checkpointer.checkpoint(self, epoch, dev_score) @@ -220,10 +252,42 @@ def _train(self, train_loader, loss_fn, X_dev=None, Y_dev=None): # Print confusion matrix if applicable if self.config["verbose"]: print("Finished Training") - if evaluate_dev and not self.multitask: - Y_p_dev = self.predict(X_dev) - print("Confusion Matrix (Dev)") - confusion_matrix(Y_p_dev, Y_dev, pretty_print=True) + if evaluate_dev: + self.score( + dev_loader, + metric=["accuracy"], + verbose=True, + print_confusion_matrix=True, + ) + + def _create_dataset(self, *data): + """Converts input data to the appropriate Dataset""" + # Make sure data is a tuple of dense tensors + data = [self._to_torch(x, dtype=torch.FloatTensor) for x in data] + return TensorDataset(*data) + + def _create_data_loader(self, data, **kwargs): + """Converts input data into a DataLoader""" + if data is None: + return None + + # Set DataLoader config + # NOTE: Not applicable if data is already a DataLoader + config = { + **self.config["train_config"]["data_loader_config"], + **kwargs, + "pin_memory": self.config["use_cuda"], + } + + # Return data as DataLoader + if isinstance(data, (tuple, list)): + return DataLoader(self._create_dataset(*data), **config) + elif isinstance(data, Dataset): + return DataLoader(data, **config) + elif isinstance(data, DataLoader): + return data + else: + raise ValueError("Input data type not recognized.") def _set_optimizer(self, train_config): optimizer_config = train_config["optimizer_config"] @@ -270,32 +334,34 @@ def _set_scheduler(self, scheduler_config, optimizer): def score( self, - X, - Y, + data, metric=["accuracy"], break_ties="random", verbose=True, + print_confusion_matrix=True, **kwargs, ): """Scores the predictive performance of the Classifier on all tasks Args: - X: The input for the predict method - Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels in - {1,...,k} + data: a Pytorch DataLoader, Dataset, or tuple with Tensors (X,Y): + X: The input for the predict method + Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels + in {1,...,k} metric: A metric (string) with which to score performance or a list of such metrics break_ties: A tie-breaking policy (see Classifier._break_ties()) verbose: The verbosity for just this score method; it will not - update the class config + update the class config. + print_confusion_matrix: Print confusion matrix Returns: scores: A (float) score or a list of such scores if kwarg metric is a list """ - Y = self._to_numpy(Y) - Y_p = self.predict(X, break_ties=break_ties, **kwargs) + Y_p, Y = self._get_predictions(data, break_ties=break_ties, **kwargs) + # Evaluate on the specified metrics metric_list = metric if isinstance(metric, list) else [metric] scores = [] for metric in metric_list: @@ -304,11 +370,52 @@ def score( if verbose: print(f"{metric.capitalize()}: {score:.3f}") + # Optionally print confusion matrix + if print_confusion_matrix: + confusion_matrix(Y_p, Y, pretty_print=True) + if isinstance(scores, list) and len(scores) == 1: return scores[0] else: return scores + def _get_predictions(self, data, break_ties="random", **kwargs): + """Computes predictions in batch, given a labeled dataset + + Args: + data: a Pytorch DataLoader, Dataset, or tuple with Tensors (X,Y): + X: The input for the predict method + Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels + in {1,...,k} + break_ties: How to break ties when making predictions + + Returns: + Y_p: A Tensor of predictions + Y: A Tensor of labels + """ + data_loader = self._create_data_loader(data) + Y_p = [] + Y = [] + + # Do batch evaluation by default, getting the predictions and labels + for batch_num, data in enumerate(data_loader): + Xb, Yb = data + Y.append(self._to_numpy(Yb)) + + # Optionally move to GPU + if self.config["use_cuda"]: + Xb = place_on_gpu(Xb) + + # Append predictions and labels from DataLoader + Y_p.append( + self._to_numpy( + self.predict(Xb, break_ties=break_ties, **kwargs) + ) + ) + Y_p = np.hstack(Y_p) + Y = np.hstack(Y) + return Y_p, Y + def predict(self, X, break_ties="random", **kwargs): """Predicts hard (int) labels for an input X on all tasks @@ -320,8 +427,7 @@ def predict(self, X, break_ties="random", **kwargs): An n-dim np.ndarray of predictions in {1,...k} """ Y_p = self._to_numpy(self.predict_proba(X, **kwargs)) - Y_ph = self._break_ties(Y_p, break_ties) - return Y_ph.astype(np.int) + return self._break_ties(Y_p, break_ties).astype(np.int) def predict_proba(self, X, **kwargs): """Predicts soft probabilistic labels for an input X on all tasks @@ -363,15 +469,18 @@ def _break_ties(self, Y_s, break_ties="random"): @staticmethod def _to_numpy(Z): - """Converts a None, list, np.ndarray, or torch.Tensor to np.ndarray""" + """Converts a None, list, np.ndarray, or torch.Tensor to np.ndarray; + also handles converting sparse input to dense.""" if Z is None: return Z + elif issparse(Z): + return Z.toarray() elif isinstance(Z, np.ndarray): return Z elif isinstance(Z, list): return np.array(Z) elif isinstance(Z, torch.Tensor): - return Z.numpy() + return Z.cpu().numpy() else: msg = ( f"Expected None, list, numpy.ndarray or torch.Tensor, " @@ -381,9 +490,12 @@ def _to_numpy(Z): @staticmethod def _to_torch(Z, dtype=None): - """Converts a None, list, np.ndarray, or torch.Tensor to torch.Tensor""" + """Converts a None, list, np.ndarray, or torch.Tensor to torch.Tensor; + also handles converting sparse input to dense.""" if Z is None: return None + elif issparse(Z): + Z = torch.from_numpy(Z.toarray()) elif isinstance(Z, torch.Tensor): pass elif isinstance(Z, list): diff --git a/metal/contrib/featurizers/requirements.txt b/metal/contrib/featurizers/requirements.txt index 9accbf14..970e724f 100644 --- a/metal/contrib/featurizers/requirements.txt +++ b/metal/contrib/featurizers/requirements.txt @@ -1,3 +1,3 @@ torchtext==0.2.3 -ntlk -scikit-learn \ No newline at end of file +nltk +scikit-learn diff --git a/metal/end_model/em_defaults.py b/metal/end_model/em_defaults.py index 02d8c298..839b5f1b 100644 --- a/metal/end_model/em_defaults.py +++ b/metal/end_model/em_defaults.py @@ -14,12 +14,13 @@ "layer_out_dims": [10, 2], "batchnorm": False, "dropout": 0.0, + # GPU + "use_cuda": False, # TRAINING "train_config": { # Display "print_every": 1, # Print after this many epochs - # GPU - "use_cuda": False, + "disable_prog_bar": False, # Disable progress bar each epoch # Dataloader "data_loader_config": {"batch_size": 32, "num_workers": 1}, # Train Loop diff --git a/metal/end_model/end_model.py b/metal/end_model/end_model.py index 033f22b7..c082e889 100644 --- a/metal/end_model/end_model.py +++ b/metal/end_model/end_model.py @@ -1,7 +1,6 @@ import torch import torch.nn as nn import torch.nn.functional as F -from torch.utils.data import DataLoader from metal.classifier import Classifier from metal.end_model.em_defaults import em_default_config @@ -13,7 +12,6 @@ class EndModel(Classifier): """A dynamically constructed discriminative classifier - Args: layer_out_dims: a list of integers corresponding to the output sizes of the layers of your network. The first element is the dimensionality of the input layer, the last element is the @@ -158,25 +156,32 @@ def _preprocess_Y(self, Y, k): Y = hard_to_soft(Y.long(), k=k) return Y - def _make_data_loader(self, X, Y, data_loader_config): - dataset = MetalDataset(X, self._preprocess_Y(Y, self.k)) - data_loader = DataLoader(dataset, shuffle=True, **data_loader_config) - return data_loader + def _create_dataset(self, *data): + return MetalDataset(*data) def _get_loss_fn(self): - loss_fn = lambda X, Y: self.criteria(self.forward(X), Y) + if self.config["use_cuda"]: + criteria = self.criteria.cuda() + else: + criteria = self.criteria + loss_fn = lambda X, Y: criteria(self.forward(X), Y) return loss_fn - def train(self, X_train, Y_train, X_dev=None, Y_dev=None, **kwargs): + def train(self, train_data, dev_data=None, **kwargs): self.config = recursive_merge_dicts(self.config, kwargs) - train_config = self.config["train_config"] - Y_train = self._to_torch(Y_train, dtype=torch.FloatTensor) - Y_dev = self._to_torch(Y_dev) + # If train_data is provided as a tuple (X, Y), we can make sure Y is in + # the correct format + # NOTE: Better handling for if train_data is Dataset or DataLoader...? + if isinstance(train_data, (tuple, list)): + X, Y = train_data + Y = self._preprocess_Y( + self._to_torch(Y, dtype=torch.FloatTensor), self.k + ) + train_data = (X, Y) - # Make data loaders - loader_config = train_config["data_loader_config"] - train_loader = self._make_data_loader(X_train, Y_train, loader_config) + # Convert input data to data loaders + train_loader = self._create_data_loader(train_data, shuffle=True) # Initialize the model self.reset() @@ -185,7 +190,7 @@ def train(self, X_train, Y_train, X_dev=None, Y_dev=None, **kwargs): loss_fn = self._get_loss_fn() # Execute training procedure - self._train(train_loader, loss_fn, X_dev=X_dev, Y_dev=Y_dev) + self._train(train_loader, loss_fn, dev_data=dev_data) def predict_proba(self, X): """Returns a [n, k] tensor of soft (float) predictions.""" diff --git a/metal/end_model/loss.py b/metal/end_model/loss.py index d5787520..11be27d6 100644 --- a/metal/end_model/loss.py +++ b/metal/end_model/loss.py @@ -26,9 +26,10 @@ def __init__(self, weight=None, reduction="elementwise_mean"): def forward(self, input, target): n, k = input.shape - cum_losses = torch.zeros(n) + # Note that t.new_zeros, t.new_full put tensor on same device as t + cum_losses = input.new_zeros(n) for y in range(k): - cls_idx = torch.full((n,), y, dtype=torch.long) + cls_idx = input.new_full((n,), y, dtype=torch.long) y_loss = F.cross_entropy(input, cls_idx, reduction="none") if self.weight is not None: y_loss = y_loss * self.weight[y] diff --git a/metal/label_model/baselines.py b/metal/label_model/baselines.py index 5a27c37f..ddb378cb 100644 --- a/metal/label_model/baselines.py +++ b/metal/label_model/baselines.py @@ -62,7 +62,7 @@ def train(self, *args, **kwargs): pass def predict_proba(self, L): - L = np.array(L.todense()).astype(int) + L = self._to_numpy(L).astype(int) n, m = L.shape Y_p = np.zeros((n, self.k)) for i in range(n): diff --git a/metal/label_model/lm_defaults.py b/metal/label_model/lm_defaults.py index 213b4267..0fe184e1 100644 --- a/metal/label_model/lm_defaults.py +++ b/metal/label_model/lm_defaults.py @@ -3,8 +3,12 @@ "seed": None, "verbose": True, "show_plots": True, + # GPU + "use_cuda": False, # TRAIN "train_config": { + # Dataloader + "data_loader_config": {"batch_size": 1000, "num_workers": 1}, # Classifier # Class balance (if learn_class_balance=False, fix to class_balance) "learn_class_balance": False, @@ -26,5 +30,6 @@ # Train loop "n_epochs": 100, "print_every": 10, + "disable_prog_bar": True, # Disable progress bar each epoch }, } diff --git a/metal/multitask/mt_classifier.py b/metal/multitask/mt_classifier.py index b9a5654b..25a0f7d2 100644 --- a/metal/multitask/mt_classifier.py +++ b/metal/multitask/mt_classifier.py @@ -1,8 +1,8 @@ - import numpy as np from metal.classifier import Classifier from metal.metrics import metric_score +from metal.multitask import MultiXYDataset, MultiYDataset class MTClassifier(Classifier): @@ -42,19 +42,20 @@ def __init__(self, K, config): def score( self, - X, - Y, + data, metric="accuracy", reduce="mean", break_ties="random", verbose=True, + print_confusion_matrix=False, **kwargs, ): """Scores the predictive performance of the Classifier on all tasks Args: - X: The input for the predict method - Y: A t-length list of [n] or [n, 1] np.ndarrays or torch.Tensors of - gold labels in {1,...,K_t} + data: either a Pytorch Dataset, DataLoader or tuple supplying (X,Y): + X: The input for the predict method + Y: A t-length list of [n] or [n, 1] np.ndarrays or + torch.Tensors of gold labels in {1,...,K_t} metric: The metric with which to score performance on each task reduce: How to reduce the scores of multiple tasks: None : return a t-length list of scores @@ -64,11 +65,13 @@ def score( scores: A (float) score or a t-length list of such scores if reduce=None """ - self._check(Y, typ=list) - Y = [self._to_numpy(Y_t) for Y_t in Y] + Y_p, Y = self._get_predictions(data, break_ties=break_ties, **kwargs) - Y_p = self.predict(X, break_ties=break_ties, **kwargs) - self._check(Y_p, typ=list) + # TODO: Handle multiple metrics... + metric_list = metric if isinstance(metric, list) else [metric] + if len(metric_list) > 1: + raise NotImplementedError("Multiple metrics for multi-task.") + metric = metric_list[0] task_scores = [] for t, Y_tp in enumerate(Y_p): @@ -169,6 +172,13 @@ def predict_task_proba(self, X, t=0, **kwargs): """ return self.predict_proba(X, **kwargs)[t] + def _create_dataset(self, *data): + X, Y = data + if isinstance(X, list): + return MultiXYDataset(X, Y) + else: + return MultiYDataset(X, Y) + @staticmethod def _to_torch(Z, dtype=None): """Converts a None, list, np.ndarray, or torch.Tensor to torch.Tensor""" @@ -176,3 +186,11 @@ def _to_torch(Z, dtype=None): return [Classifier._to_torch(z, dtype=dtype) for z in Z] else: return Classifier._to_torch(Z) + + @staticmethod + def _to_numpy(Z): + """Converts a None, list, np.ndarray, or torch.Tensor to np.ndarray""" + if isinstance(Z, list): + return [Classifier._to_numpy(z) for z in Z] + else: + return Classifier._to_numpy(Z) diff --git a/metal/multitask/mt_end_model.py b/metal/multitask/mt_end_model.py index a0156b5d..0e00a1b1 100644 --- a/metal/multitask/mt_end_model.py +++ b/metal/multitask/mt_end_model.py @@ -4,18 +4,12 @@ import torch import torch.nn as nn import torch.nn.functional as F -from torch.utils.data import DataLoader from metal.end_model import EndModel from metal.end_model.em_defaults import em_default_config from metal.end_model.loss import SoftCrossEntropyLoss from metal.modules import IdentityModule -from metal.multitask import ( - MTClassifier, - MultiXYDataset, - MultiYDataset, - TaskGraph, -) +from metal.multitask import MTClassifier, TaskGraph from metal.multitask.mt_em_defaults import mt_em_default_config from metal.utils import recursive_merge_dicts @@ -265,7 +259,7 @@ def forward(self, x): head_outputs[t] = head(task_input) return head_outputs - def _preprocess_Y(self, Y): + def _preprocess_Y(self, Y, k=None): """Convert Y to t-length list of soft labels if necessary""" # If not a list, convert to a singleton list if not isinstance(Y, list): @@ -283,18 +277,14 @@ def _preprocess_Y(self, Y): for t, Y_t in enumerate(Y) ] - def _make_data_loader(self, X, Y, data_loader_config): - if isinstance(X, list): - dataset = MultiXYDataset(X, self._preprocess_Y(Y)) - else: - dataset = MultiYDataset(X, self._preprocess_Y(Y)) - data_loader = DataLoader(dataset, shuffle=True, **data_loader_config) - return data_loader - def _get_loss_fn(self): """Returns the loss function to use in the train routine""" + if self.config["use_cuda"]: + criteria = self.criteria.cuda() + else: + criteria = self.criteria loss_fn = lambda X, Y: sum( - self.criteria(Y_tp, Y_t) for Y_tp, Y_t in zip(self.forward(X), Y) + criteria(Y_tp, Y_t) for Y_tp, Y_t in zip(self.forward(X), Y) ) return loss_fn diff --git a/metal/multitask/mt_label_model.py b/metal/multitask/mt_label_model.py index fb38511d..789bbf57 100644 --- a/metal/multitask/mt_label_model.py +++ b/metal/multitask/mt_label_model.py @@ -63,6 +63,9 @@ def _create_L_ind(self, L): if issparse(L[0]): L = [L_t.todense() for L_t in L] + # Make sure converted to numpy here + L = self._to_numpy(L) + L_ind = np.ones((self.n, self.m * self.k)) for yi, y in enumerate(self.task_graph.feasible_set()): for t in range(self.t): diff --git a/metal/multitask/utils.py b/metal/multitask/utils.py index 85ff3379..9bd059e1 100644 --- a/metal/multitask/utils.py +++ b/metal/multitask/utils.py @@ -1,4 +1,5 @@ import numpy as np +from scipy.sparse import issparse from torch.utils.data import Dataset @@ -36,12 +37,20 @@ class MultiXYDataset(Dataset): """ def __init__(self, X, Y): + + # Need to convert sparse matrices to dense here + # TODO: Need to handle sparse matrices better overall; maybe not use + # Datasets for them...? + if issparse(X[0]): + X = [Xt.toarray() for Xt in X] + + # Check and set data objects self.X = X self.Y = Y self.t = len(Y) - n = len(X[0]) - assert np.all([len(Y_t) == n for Y_t in Y]) - assert np.all([len(X_t) == n for X_t in X]) + self.n = len(X[0]) + assert np.all([len(X_t) == self.n for X_t in X]) + assert np.all([len(Y_t) == self.n for Y_t in Y]) def __getitem__(self, index): return tuple( @@ -52,4 +61,4 @@ def __getitem__(self, index): ) def __len__(self): - return len(self.X[0]) + return self.n diff --git a/metal/utils.py b/metal/utils.py index d5b6b7c1..6e146f7a 100644 --- a/metal/utils.py +++ b/metal/utils.py @@ -365,3 +365,16 @@ def slice_data(data, indices): return outputs[0] else: return outputs + + +def place_on_gpu(data): + """Utility to place data on GPU, where data could be a torch.Tensor, a tuple + or list of Tensors, or a tuple or list of tuple or lists of Tensors""" + if isinstance(data, (list, tuple)): + for i in range(len(data)): + data[i] = place_on_gpu(data[i]) + return data + elif isinstance(data, torch.Tensor): + return data.cuda() + else: + return ValueError(f"Data type {type(data)} not recognized.") diff --git a/setup.py b/setup.py index 1420423e..55200e28 100644 --- a/setup.py +++ b/setup.py @@ -65,7 +65,7 @@ def run(self): "Homepage": "https://hazyresearch.github.io/snorkel/", "Source": "https://github.com/HazyResearch/metal/", "Bug Reports": "https://github.com/HazyResearch/metal/issues", - "Citation": "https://ajratner.github.io/assets/papers/mts-draft.pdf", + "Citation": "https://arxiv.org/abs/1810.02840", }, cmdclass={"clean": CleanCommand}, ) diff --git a/tests/gpu/README.md b/tests/gpu/README.md new file mode 100644 index 00000000..43bfd1f5 --- /dev/null +++ b/tests/gpu/README.md @@ -0,0 +1,7 @@ +### GPU Tests + +**Note that this is not a package** (no `__init__.py` file), so that `nosetests` skips it. +To run these tests, install the `requirements.txt` and then run (from base directory): +``` +nosetests tests/gpu +``` \ No newline at end of file diff --git a/tests/gpu/requirements.txt b/tests/gpu/requirements.txt new file mode 100644 index 00000000..84af6d2f --- /dev/null +++ b/tests/gpu/requirements.txt @@ -0,0 +1 @@ +GPUtil \ No newline at end of file diff --git a/tests/gpu/test_gpu.py b/tests/gpu/test_gpu.py new file mode 100644 index 00000000..da221b62 --- /dev/null +++ b/tests/gpu/test_gpu.py @@ -0,0 +1,60 @@ +import os +import pickle +import unittest + +import GPUtil + +from metal.end_model import EndModel +from metal.label_model import LabelModel +from metal.utils import split_data + +# Making sure we're using GPU 0 +os.environ["CUDA_VISIBLE_DEVICES"] = "0" + + +class GPUTest(unittest.TestCase): + @unittest.skipIf( + "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", + "Skipping this test on Travis CI.", + ) + def test_gpustorage(self): + # Running basics tutorial problem + with open("tutorials/data/basics_tutorial.pkl", "rb") as f: + X, Y, L, D = pickle.load(f) + + Xs, Ys, Ls, Ds = split_data( + X, Y, L, D, splits=[0.8, 0.1, 0.1], stratify_by=Y, seed=123 + ) + + label_model = LabelModel(k=2, seed=123) + label_model.train(Ls[0], Y_dev=Ys[1], n_epochs=500, print_every=25) + Y_train_ps = label_model.predict_proba(Ls[0]) + + # Creating a really large end model to use lots of memory + end_model = EndModel([1000, 100000, 2], seed=123, use_cuda=True) + + # Getting initial GPU storage use + initial_gpu_mem = GPUtil.getGPUs()[0].memoryUsed + + # Training model + end_model.train( + (Xs[0], Y_train_ps), + dev_data=(Xs[1], Ys[1]), + l2=0.1, + batch_size=256, + n_epochs=3, + print_every=1, + validation_metric="f1", + ) + + # Final GPU storage use + final_gpu_mem = GPUtil.getGPUs()[0].memoryUsed + + # On a Titan X, this model uses ~ 3 GB of memory + gpu_mem_difference = final_gpu_mem - initial_gpu_mem + + self.assertGreater(gpu_mem_difference, 1000) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/metal/end_model/test_end_model.py b/tests/metal/end_model/test_end_model.py index 7b844b1d..ec8ffbb0 100644 --- a/tests/metal/end_model/test_end_model.py +++ b/tests/metal/end_model/test_end_model.py @@ -33,8 +33,8 @@ def setUpClass(cls): def test_logreg(self): em = LogisticRegression(seed=1, input_dim=2, verbose=False) Xs, Ys = self.single_problem - em.train(Xs[0], Ys[0], Xs[1], Ys[1], n_epochs=5) - score = em.score(Xs[2], Ys[2], verbose=False) + em.train((Xs[0], Ys[0]), dev_data=(Xs[1], Ys[1]), n_epochs=5) + score = em.score((Xs[2], Ys[2]), verbose=False) self.assertGreater(score, 0.95) def test_softmax(self): @@ -54,8 +54,8 @@ def test_softmax(self): + 1 ) Ys.append(Y) - em.train(Xs[0], Ys[0], Xs[1], Ys[1], lr=0.1, n_epochs=10) - score = em.score(Xs[2], Ys[2], verbose=False) + em.train((Xs[0], Ys[0]), dev_data=(Xs[1], Ys[1]), lr=0.1, n_epochs=10) + score = em.score((Xs[2], Ys[2]), verbose=False) self.assertGreater(score, 0.95) def test_sparselogreg(self): @@ -74,9 +74,9 @@ def test_sparselogreg(self): em = SparseLogisticRegression( seed=1, input_dim=F, padding_idx=0, verbose=False ) - em.train(X, Y, n_epochs=5, optimizer="sgd", lr=0.0005) + em.train((X, Y), n_epochs=5, optimizer="sgd", lr=0.0005) self.assertEqual(float(em.network[-1].W.weight.data[0, :].sum()), 0.0) - score = em.score(X, Y, verbose=False) + score = em.score((X, Y), verbose=False) self.assertGreater(score, 0.95) def test_singletask(self): @@ -89,8 +89,8 @@ def test_singletask(self): verbose=False, ) Xs, Ys = self.single_problem - em.train(Xs[0], Ys[0], Xs[1], Ys[1], n_epochs=5) - score = em.score(Xs[2], Ys[2], verbose=False) + em.train((Xs[0], Ys[0]), dev_data=(Xs[1], Ys[1]), n_epochs=5) + score = em.score((Xs[2], Ys[2]), verbose=False) self.assertGreater(score, 0.95) def test_singletask_extras(self): @@ -103,8 +103,8 @@ def test_singletask_extras(self): verbose=False, ) Xs, Ys = self.single_problem - em.train(Xs[0], Ys[0], Xs[1], Ys[1], n_epochs=5) - score = em.score(Xs[2], Ys[2], verbose=False) + em.train((Xs[0], Ys[0]), dev_data=(Xs[1], Ys[1]), n_epochs=5) + score = em.score((Xs[2], Ys[2]), verbose=False) self.assertGreater(score, 0.95) def test_custom_modules(self): @@ -122,15 +122,13 @@ def test_custom_modules(self): ) Xs, Ys = self.single_problem em.train( - Xs[0], - Ys[0], - Xs[1], - Ys[1], + (Xs[0], Ys[0]), + dev_data=(Xs[1], Ys[1]), n_epochs=5, verbose=False, show_plots=False, ) - score = em.score(Xs[2], Ys[2], verbose=False) + score = em.score((Xs[2], Ys[2]), verbose=False) self.assertGreater(score, 0.95) diff --git a/tests/metal/label_model/test_label_model.py b/tests/metal/label_model/test_label_model.py index f64db9a0..fda5181b 100644 --- a/tests/metal/label_model/test_label_model.py +++ b/tests/metal/label_model/test_label_model.py @@ -3,6 +3,7 @@ import numpy as np +from metal.label_model.baselines import MajorityLabelVoter from metal.label_model.label_model import LabelModel from synthetic.generate import SingleTaskTreeDepsGenerator @@ -35,9 +36,13 @@ def _test_label_model(self, data, test_acc=True): # Test label prediction accuracy if test_acc: - Y_pred = label_model.predict_proba(data.L).argmax(axis=1) + 1 - acc = np.where(data.Y == Y_pred, 1, 0).sum() / data.n - self.assertGreater(acc, 0.95) + score = label_model.score((data.L, data.Y)) + self.assertGreater(score, 0.95) + + # Test against baseline + mv = MajorityLabelVoter() + mv_score = mv.score((data.L, data.Y)) + self.assertGreater(score, mv_score) def test_no_deps(self): for seed in range(self.n_iters): diff --git a/tests/metal/multitask/test_mt_end_model.py b/tests/metal/multitask/test_mt_end_model.py index 756cb576..ab8052d4 100644 --- a/tests/metal/multitask/test_mt_end_model.py +++ b/tests/metal/multitask/test_mt_end_model.py @@ -49,14 +49,12 @@ def test_multitask_top(self): top_layer = len(em.config["layer_out_dims"]) - 1 self.assertEqual(len(em.task_map[top_layer]), em.t) em.train( - self.Xs[0], - self.Ys[0], - self.Xs[1], - self.Ys[1], + (self.Xs[0], self.Ys[0]), + dev_data=(self.Xs[1], self.Ys[1]), verbose=False, n_epochs=10, ) - score = em.score(self.Xs[2], self.Ys[2], reduce="mean", verbose=False) + score = em.score((self.Xs[2], self.Ys[2]), reduce="mean", verbose=False) self.assertGreater(score, 0.95) def test_multitask_custom_attachments(self): @@ -75,14 +73,12 @@ def test_multitask_custom_attachments(self): self.assertEqual(em.task_map[1][0], 0) self.assertEqual(em.task_map[2][0], 1) em.train( - self.Xs[0], - self.Ys[0], - self.Xs[1], - self.Ys[1], + (self.Xs[0], self.Ys[0]), + dev_data=(self.Xs[1], self.Ys[1]), verbose=False, n_epochs=10, ) - score = em.score(self.Xs[2], self.Ys[2], reduce="mean", verbose=False) + score = em.score((self.Xs[2], self.Ys[2]), reduce="mean", verbose=False) self.assertGreater(score, 0.95) def test_multitask_two_modules(self): @@ -103,9 +99,12 @@ def test_multitask_two_modules(self): for i, X in enumerate(self.Xs): Xs.append([X[:, 0], X[:, 1]]) em.train( - Xs[0], self.Ys[0], Xs[1], self.Ys[1], verbose=False, n_epochs=10 + (Xs[0], self.Ys[0]), + dev_data=(Xs[1], self.Ys[1]), + verbose=False, + n_epochs=10, ) - score = em.score(Xs[2], self.Ys[2], reduce="mean", verbose=False) + score = em.score((Xs[2], self.Ys[2]), reduce="mean", verbose=False) self.assertGreater(score, 0.95) def test_multitask_custom_heads(self): @@ -123,14 +122,12 @@ def test_multitask_custom_heads(self): task_head_layers=[1, 2], ) em.train( - self.Xs[0], - self.Ys[0], - self.Xs[1], - self.Ys[1], + (self.Xs[0], self.Ys[0]), + dev_data=(self.Xs[1], self.Ys[1]), verbose=False, n_epochs=10, ) - score = em.score(self.Xs[2], self.Ys[2], reduce="mean", verbose=False) + score = em.score((self.Xs[2], self.Ys[2]), reduce="mean", verbose=False) self.assertGreater(score, 0.95) diff --git a/tests/metal/multitask/test_mt_label_model.py b/tests/metal/multitask/test_mt_label_model.py index e5520946..de84edc5 100644 --- a/tests/metal/multitask/test_mt_label_model.py +++ b/tests/metal/multitask/test_mt_label_model.py @@ -33,7 +33,7 @@ def _test_label_model(self, data, test_acc=True): # Test label prediction accuracy if test_acc: - acc = label_model.score(data.L, data.Y) + acc = label_model.score((data.L, data.Y)) self.assertGreater(acc, 0.95) def test_multitask(self): diff --git a/tutorials/Basics.ipynb b/tutorials/Basics.ipynb index 6e8e82b6..962bbdd9 100644 --- a/tutorials/Basics.ipynb +++ b/tutorials/Basics.ipynb @@ -42,6 +42,8 @@ "metadata": {}, "outputs": [], "source": [ + "import sys\n", + "sys.path.append('../../metal')\n", "import metal" ] }, @@ -424,20 +426,36 @@ "text": [ "Computing O...\n", "Estimating \\mu...\n", - "[E:0]\tTrain Loss: 6.036\n", - "[E:250]\tTrain Loss: 0.029\n", - "[E:500]\tTrain Loss: 0.029\n", - "[E:750]\tTrain Loss: 0.029\n", - "[E:999]\tTrain Loss: 0.029\n", + "[E:0]\tTrain Loss: 6.028\n", + "[E:25]\tTrain Loss: 0.438\n", + "[E:50]\tTrain Loss: 0.029\n", + "[E:75]\tTrain Loss: 0.004\n", + "[E:100]\tTrain Loss: 0.003\n", + "[E:125]\tTrain Loss: 0.003\n", + "[E:150]\tTrain Loss: 0.002\n", + "[E:175]\tTrain Loss: 0.002\n", + "[E:200]\tTrain Loss: 0.002\n", + "[E:225]\tTrain Loss: 0.002\n", + "[E:250]\tTrain Loss: 0.002\n", + "[E:275]\tTrain Loss: 0.002\n", + "[E:300]\tTrain Loss: 0.002\n", + "[E:325]\tTrain Loss: 0.002\n", + "[E:350]\tTrain Loss: 0.002\n", + "[E:375]\tTrain Loss: 0.002\n", + "[E:400]\tTrain Loss: 0.002\n", + "[E:425]\tTrain Loss: 0.002\n", + "[E:450]\tTrain Loss: 0.002\n", + "[E:475]\tTrain Loss: 0.002\n", + "[E:499]\tTrain Loss: 0.002\n", "Finished Training\n", - "CPU times: user 995 ms, sys: 23.3 ms, total: 1.02 s\n", - "Wall time: 442 ms\n" + "CPU times: user 817 ms, sys: 26.1 ms, total: 843 ms\n", + "Wall time: 284 ms\n" ] } ], "source": [ "%%time\n", - "label_model.train(Ls[0], Y_dev=Ys[1], n_epochs=1000, print_every=250, lr=0.01, l2=1e-1)" + "label_model.train(Ls[0], Y_dev=Ys[1], n_epochs=500, print_every=25)" ] }, { @@ -456,12 +474,15 @@ "name": "stdout", "output_type": "stream", "text": [ - "Accuracy: 0.879\n" + "Accuracy: 0.879\n", + " y=1 y=2 \n", + " l=1 181 56 \n", + " l=2 65 698 \n" ] } ], "source": [ - "score = label_model.score(Ls[1], Ys[1])" + "score = label_model.score((Ls[1], Ys[1]))" ] }, { @@ -480,14 +501,17 @@ "name": "stdout", "output_type": "stream", "text": [ - "Precision: 0.771\n", - "Recall: 0.724\n", - "F1: 0.746\n" + "Precision: 0.764\n", + "Recall: 0.736\n", + "F1: 0.749\n", + " y=1 y=2 \n", + " l=1 181 56 \n", + " l=2 65 698 \n" ] } ], "source": [ - "scores = label_model.score(Ls[1], Ys[1], metric=['precision', 'recall', 'f1'])" + "scores = label_model.score((Ls[1], Ys[1]), metric=['precision', 'recall', 'f1'])" ] }, { @@ -506,10 +530,12 @@ "name": "stdout", "output_type": "stream", "text": [ - "Accuracy: 0.836\n", "Precision: 0.623\n", "Recall: 0.841\n", - "F1: 0.716\n" + "F1: 0.716\n", + " y=1 y=2 \n", + " l=1 207 125 \n", + " l=2 39 629 \n" ] } ], @@ -517,7 +543,7 @@ "from metal.label_model.baselines import MajorityLabelVoter\n", "\n", "mv = MajorityLabelVoter(seed=123)\n", - "scores = mv.score(Ls[1], Ys[1], metric=['accuracy', 'precision', 'recall', 'f1'])" + "scores = mv.score((Ls[1], Ys[1]), metric=['precision', 'recall', 'f1'])" ] }, { @@ -552,13 +578,13 @@ { "data": { "text/plain": [ - "array([[0.32560527, 0.67439473],\n", - " [0.0128121 , 0.9871879 ],\n", - " [0.02633596, 0.97366404],\n", + "array([[0.33879491, 0.66120509],\n", + " [0.01750567, 0.98249433],\n", + " [0.02757502, 0.97242498],\n", " ...,\n", - " [0.7144198 , 0.2855802 ],\n", - " [0.99065254, 0.00934746],\n", - " [0.35757709, 0.64242291]])" + " [0.74142168, 0.25857832],\n", + " [0.98866598, 0.01133402],\n", + " [0.38616893, 0.61383107]])" ] }, "execution_count": 13, @@ -602,7 +628,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAH8ZJREFUeJzt3XuYVXXd9/H3RwTxlBxEQ5CDRoimIo0KiYfEp1ITtCduLU0yi6fU7KiR93M/Yrem3ZZ22wGjLMFUNMMbPIaJXGYeOWkqEuMBGUEYERAEVOz7/LF/I5thMbNnhjV7w3xe17WvvdZv/dZa3+Ea5+NvHRURmJmZ1bdDuQswM7PK5IAwM7NMDggzM8vkgDAzs0wOCDMzy+SAMDOzTA4I26ZJmiHpq629blr/aEnzm7t+xvbukzQqTX9Z0iNbcdtnSpq2tbZnbYMDwiqCpFcknVDuOupIGivpPUmr0+efkn4pqXtdn4j4W0T0L3Fbf2ysX0ScGBETtkLtfSSFpB2Ltn1zRHyqpdu2tsUBYbZlt0XE7kAX4DTgw8Cs4pDYGlTg/xat4viX0iqapM6S7pZUK2lFmu5Zr9v+kp6UtErSFElditYfLOlRSSslPS3puKbWEBHvRcRzwOlALfC9tO3jJNUU7esHkl5LI475koZJ+gxwCXC6pDWSnk59Z0i6QtLfgbXAfhmHvCTpF+nnekHSsKIFm4y46o1SHk7fK9M+h9Q/ZCXpE5KeStt+StInipbNkPSfkv6efpZpkvZs6r+bbfscEFbpdgD+APQGegHrgF/W63M28BVgH2ADcB2ApB7APcDlFEYB3wf+LKlbcwqJiPeBKcDR9ZdJ6g9cAByeRh2fBl6JiPuBH1MYjewWEYcWrfYlYDSwO7AwY5dHAi8BewKXApOLw68Bx6TvTmmfj9WrtQuFf5frgK7ANcA9kroWdfsicA6wF9CBwr+dtTEOCKtoEbE8Iv4cEWsjYjVwBXBsvW43RcSzEfE28B/Av0lqB5wF3BsR90bEvyLiAWAmcFILSlpMIWzqex/YCThQUvuIeCUiXmxkWzdGxHMRsSEi3stYvgz4eRrB3AbMB05uQe11TgYWRMRNad+3Ai8ApxT1+UNE/DMi1gG3AwO3wn5tG+OAsIomaRdJv5G0UNJbFA6fdEoBUGdR0fRCoD2F/+vuDYxMh5dWSloJDAVacg6hB/Bm/caIqAa+DYwFlkmaJGmfRra1qJHlr8WmT9NcSGGU1FL7sPmIZSGFn63O60XTa4HdtsJ+bRvjgLBK9z2gP3BkRHyIjYdPVNRn36LpXsB7wBsU/gDfFBGdij67RsRVzSkknUg+Bfhb1vKIuCUihlIIpgB+UrdoC5ts7FHKPSQV/5y9KIxgAN4Gdila9uEmbHdxqrFYL+C1RtazNsYBYZWkvaSORZ8dKRyfX0fhhGsXCsfi6ztL0oGSdgF+BNyRzhf8EThF0qcltUvbPC7jJHeDJLWXNAC4lcIf4msy+vSXdLyknYD1qeb30+KlQJ9mXKm0F3Bh2v9IYABwb1o2FzgjLasCPl+0Xi3wL2C/LWz3XuCjkr4oaUdJpwMHAnc3sT7bzjkgrJLcS+EPa91nLPBzYGcKI4LHgfsz1rsJuJHCYZGOwIUAEbEIGEHhKqJaCiOKiyj99/50SWuAlcBUYDnw8YhYnNF3J+CqVOfrFP64X5KW/Sl9L5c0u8R9AzwB9EvbvAL4fEQsT8v+A9gfWAFcBtxSt1JErE39/54OrQ0u3mjaxmcpjM6WAxcDn42IN5pQm7UB8guDzMwsi0cQZmaWyQFhZmaZHBBmZpYp14CQ9B1Jz0l6VtKt6SqSvpKekLRA0m2SOqS+O6X56rS8T561mZlZw3I7SZ0ec/AIcGBErJN0O4WrVE4CJkfEJEnXA09HxDhJ5wGHRMTXJZ0BnBYRpze0jz333DP69OmTS/1mZturWbNmvRERjT5yZsfGOrTQjsDOkt6jcFPPEuB4Cs95AZhA4VLGcRQuRxyb2u8AfilJ0UCC9enTh5kzZ+ZTuZnZdkpS1rO/NpPbIaaIeA34KfAqhWBYBcwCVkbEhtStho239/cgPXogLV9F4UFim5A0WtJMSTNra2vzKt/MrM3LLSAkdaYwKuhL4dkvuwInZnStGyGogWUbGyLGR0RVRFR169ash3KamVkJ8jxJfQLwckTUpidVTgY+QeFBa3WHtnqy8dkyNaRn6qTle5DxUDQzM2sdeZ6DeBUYnJ6Psw4YRuFRyw9ReG7MJGAUhefrQ+FRBqOAx9Ly6Q2dfzCz7dd7771HTU0N69evL3cp27SOHTvSs2dP2rdv36z1cwuIiHhC0h3AbAovcZkDjKfwopJJki5PbTekVW4AbpJUTWHkcEZetZlZZaupqWH33XenT58+bPpAWytVRLB8+XJqamro27dvs7aR61VMEXEpmz998yXgiIy+64GRedZjZtuG9evXOxxaSBJdu3alJRfz+E5qM6tIDoeWa+m/oQPCzMwy5X2jnJlZi/UZc89W3d4rVzX8au927dpx8MEHs2HDBgYMGMCECRPYZZddGlxnS2bMmMFPf/pT7r77bqZOncrzzz/PmDFjMvuuXLmSW265hfPOO69J+xg7diy77bYb3//+95tV45Y4IMwq1Nb+o7gt+e3w7rxXs7Js+995552ZO3cuAGeeeSbXX3893/3udz9YHhFEBDvs0LSDMMOHD2f48OFbXL5y5Up+/etfNzkg8uJDTGZmDTj66KOprq7mlVdeYcCAAZx33nkMGjSIRYsWMW3aNIYMGcKgQYMYOXIka9asAeD+++/ngAMOYOjQoUyePPmDbd14441ccMEFACxdupTTTjuNQw89lEMPPZRHH32UMWPG8OKLLzJw4EAuuugiAK6++moOP/xwDjnkEC69dOM1P1dccQX9+/fnhBNOYP78+bn87A4IM7Mt2LBhA/fddx8HH3wwAPPnz+fss89mzpw57Lrrrlx++eX89a9/Zfbs2VRVVXHNNdewfv16vva1r3HXXXfxt7/9jddffz1z2xdeeCHHHnssTz/9NLNnz+aggw7iqquuYv/992fu3LlcffXVTJs2jQULFvDkk08yd+5cZs2axcMPP8ysWbOYNGkSc+bMYfLkyTz11FO5/Pw+xGRmVs+6desYOHAgUBhBnHvuuSxevJjevXszeHDhFd+PP/44zz//PEcddRQA7777LkOGDOGFF16gb9++9OvXD4CzzjqL8ePHb7aP6dOnM3HiRKBwzmOPPfZgxYoVm/SZNm0a06ZN47DDDgNgzZo1LFiwgNWrV3Paaad9cF6kocNWLeGAMLO2Z/GcBhfv3HEn5t77h40NbzwHSxez607tPlg33qjmfw2t4tZfX7nJunOffQG99/bGfbz5EqxfVZhfsRDeri1M/2sDLJ4LO3XYuPLSxbBh/cZ9rF7KD79xJv/nS5/fZB8//+3NSLs284cvnQ8xmZk1w+CPH8Lfn3qa6pdfBWDtunX888WFHPCRPrz86mJefGURALf+z/2Z6w8begTjJv4JgPfff5+3Vq9h9113YfWatz/o8+njhvD726ay5u21ALy2ZBnL3niTYwYP4s4772TdunWsXr2au+66K5ef0SMIM6t4Uy84ikN2eLncZWyiW9fO3HjtWL5w/iW88+67AFx+8fl8dP/ejP+vf+fks7/Fnl06MfSIgTz7QvVm6//3jy5i9MWXc8OkKbTbYQfGXflDhlQdylGHD+Rjx4/kxE9+gqv/4zvMW/AyQ4Z/GYDddtmZP/7icgYdPIDTTz+dgQMH0rt3b44++uhcfsbc3ijXGqqqqsIvDLLtVVu/zHXvXvtt0lZpAVF2+xxWUrd58+YxYMCATdokzYqIqsbW9SEmMzPL5IAwM7NMDggzM8vkgDAzs0wOCDMzy+SAMDOzTL4Pwswq3iG/6711Nzh6RqNdltYu5ztjf8bjs/9B5z12p0P79lx83ihOO/H4zP4zHp3JT6+fyN0Tr9tsWZ8jT2bmfX9kzy6dW1h468ptBCGpv6S5RZ+3JH1bUhdJD0hakL47p/6SdJ2kaknPSBqUV21mZg2JCE79ync55shBvPTYXcy6/xYmjbuSmiXLyl1aq8ptBBER84GBAJLaAa8BdwJjgAcj4ipJY9L8D4ATgX7pcyQwLn2bmbWq6Y88SYcO7fn62RufgdS75z588ytnsH79O3zjhz9m5jPz2LFdO6659Lt88qjDN1l/+Zsr+cL5l1C7fAVHDDyIbfWG5NY6BzEMeDEiFgIjgAmpfQJwapoeAUyMgseBTpK6t1J9ZmYfeO6fLzHoYwdkLvvVjbcD8I8Hb+fWX/+YUd++lPXr39mkz2XXjmfoEQOZM+1Whn/qWF59LfuR35Wutc5BnAHcmqb3joglABGxRNJeqb0HsKhonZrUtqR4Q5JGA6MBevXqlWfNZmYAnH/JlTzy5Fw6dGhPz+57881zTgfggI/0pXfPD/PPlxZu0v/hx2cz+Xc/BeDkE46mc6cPtXrNW0PuIwhJHYDhwJ8a65rRttm4LCLGR0RVRFR169Zta5RoZraJgz66H7OffeGD+V/9+Ic8ePv11C5fUfLhIinrT9q2pTUOMZ0IzI6IpWl+ad2ho/Rdd9anBti3aL2ewOJWqM/MbBPHDz2C9e+8y7gJG/+/du269QAcc+Qgbr7zPgD++eJCXn3tdfrv32eT9Y8ZPIibJxf63Df976xY+VbrFL6VtcYhpi+w8fASwFRgFHBV+p5S1H6BpEkUTk6vqjsUZWZt2zNfXdiqT3OVxP/c8DO+M/Zn/Ne4CXTr2pldd96Zn1xyISM+fRxfH/NjDh72b+zYrh03XnsZOxW/9Ae49Duj+cL5lzDo01/k2MGD6NXjw61W+9aU6+O+Je1C4bzCfhGxKrV1BW4HegGvAiMj4k0VxmO/BD4DrAXOiYgGn+Xtx33b9syP+/bjvhvUCo/7znUEERFrga712pZTuKqpft8Azs+zHjMzK50ftWFmZpkcEGZWcYLYZm8uqyQt/Td0QJhZxVm48j02rH3LIdECEcHy5cvp2LFjs7fhh/WZWcX5xRMr+CbQu9MbKN0iNU+15S2q0qya12iXjh070rNnz2bvwgFhZhXnrXf+xRUPL9+k7ZWOXyxTNRVq7Krcd+FDTGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkcEGZmlinXgJDUSdIdkl6QNE/SEEldJD0gaUH67pz6StJ1kqolPSNpUJ61mZlZw/IeQfw3cH9EHAAcCswDxgAPRkQ/4ME0D3Ai0C99RgPjcq7NzMwakFtASPoQcAxwA0BEvBsRK4ERwITUbQJwapoeAUyMgseBTpK651WfmZk1LM8RxH5ALfAHSXMk/U7SrsDeEbEEIH3vlfr3ABYVrV+T2jYhabSkmZJm1tb6DVNmZnnJMyB2BAYB4yLiMOBtNh5OyqKMts1eSBsR4yOiKiKqunXrtnUqNTOzzeQZEDVATUQ8kebvoBAYS+sOHaXvZUX99y1avyewOMf6zMysAbkFRES8DiyS1D81DQOeB6YCo1LbKGBKmp4KnJ2uZhoMrKo7FGVmZq1vx5y3/03gZkkdgJeAcyiE0u2SzgVeBUamvvcCJwHVwNrU18zMyiTXgIiIuUBVxqJhGX0DOD/PeszMrHS+k9rMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwyOSDMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwyOSDMzCyTA8LMzDI5IMzMLJMDwszMMjUaEJJ2lbRDmv6opOGS2udfmpmZlVMpI4iHgY6SegAPUnjT2415FmVmZuVXSkAoItYCnwN+ERGnAQfmW5aZmZVbSQEhaQhwJnBPaivpVaWSXpH0D0lzJc1MbV0kPSBpQfruXLcTSddJqpb0jKRBzfmBzMxs6yglIL4F/BC4MyKek7Qf8FAT9vHJiBgYEXXvph4DPBgR/SgcshqT2k8E+qXPaGBcE/ZhZmZbWaMjgYh4mMJ5iLr5l4ALW7DPEcBxaXoCMAP4QWqfGBEBPC6pk6TuEbGkBfsyM7NmajQgJH0U+D7Qp7h/RBxfwvYDmCYpgN9ExHhg77o/+hGxRNJeqW8PYFHRujWpbZOAkDSawgiDXr16lVCCmZk1RynnEv4EXA/8Dni/ids/KiIWpxB4QNILDfRVRlts1lAImfEAVVVVmy03M7Oto5SA2BARzTofEBGL0/cySXcCRwBL6w4dSeoOLEvda4B9i1bvCSxuzn7NzKzlSjlJfZek8yR1T1cgdZHUpbGV0g12u9dNA58CngWmAqNSt1HAlDQ9FTg7Xc00GFjl8w9mZuVTygii7o/5RUVtAezXyHp7A3dKqtvPLRFxv6SngNslnQu8CoxM/e8FTgKqgbUUbsgzM7MyKeUqpr7N2XC62unQjPblwLCM9gDOb86+zMxs6yvlKqb2wDeAY1LTDApXJL2XY11mZlZmpRxiGge0B36d5r+U2r6aV1FmZlZ+pQTE4RFRfKhouqSn8yrIzMwqQylXMb0vaf+6mfSojabeD2FmZtuYUkYQFwEPSXqJws1svfEVRmZm271SrmJ6UFI/oD+FgHghIt7JvTIzMyurLQaEpOMjYrqkz9VbtL8kImJyzrWZmVkZNTSCOBaYDpySsSwAB4SZ2XZsiwEREZemyR9FxMvFyyQ16+Y5MzPbdpRyFdOfM9ru2NqFmJlZZWnoHMQBwEHAHvXOQ3wI6Jh3YWZmVl4NnYPoD3wW6MSm5yFWA1/LsygzMyu/hs5BTAGmSBoSEY+1Yk1mZlYBSjkH8XVJnepmJHWW9PscazIzswpQSkAcEhEr62YiYgVwWH4lmZlZJSglIHaQ1LluJr1NrpRHdJiZ2TaslD/0PwMelVR3aetI4Ir8SjIzs0pQyrOYJkqaCRxP4VlMn4uI53OvzMzMymqLh5gkfSh9dwFeB24BbgZeT20lkdRO0hxJd6f5vpKekLRA0m2SOqT2ndJ8dVrep/k/lpmZtVRD5yBuSd+zgJlFn7r5Un0LmFc0/xPg2ojoB6wAzk3t5wIrIuIjwLWpn5mZlckWAyIiPpu++0bEfkWfvhGxXykbl9QTOBn4XZoXhUNVdeczJgCnpukRaZ60fFjqb2ZmZdDQozYGNbRiRMwuYfs/By4Gdk/zXYGVEbEhzdcAPdJ0D2BR2vYGSatS/zfq1TUaGA3Qq1evEkowM7PmaOgk9c/Sd0egCniawknqQ4AngKENbVjSZ4FlETFL0nF1zRldo4RlGxsixgPjAaqqqjZbbmZmW0dDh5g+GRGfBBYCgyKiKiI+TuEmueoStn0UMFzSK8AkCoeWfg50klQXTD2BxWm6BtgXIC3fA3izyT+RmZltFaXcKHdARPyjbiYingUGNrZSRPwwInpGRB/gDGB6RJwJPAR8PnUbBUxJ01PTPGn59IjwCMHMrExKuVFunqTfAX+kcMjnLDa9KqmpfgBMknQ5MAe4IbXfANwkqZrCyOGMFuzDzMxaqJSAOAf4BoXLVQEeBsY1ZScRMQOYkaZfAo7I6LOewl3aZmZWAUq5k3q9pOuBeyNifivUZGZmFaDRcxCShgNzgfvT/EBJU/MuzMzMyquUk9SXUjgktBIgIuYCfXKsyczMKkApAbEhIlblXomZmVWUUk5SPyvpi0A7Sf2AC4FH8y3LzMzKrZQRxDeBg4B3KDzAbxXw7TyLMjOz8mtwBCGpHXBZRFwE/HvrlGRmZpWgwRFERLwPfLyVajEzswpSyjmIOemy1j8Bb9c1RsTk3KoyM7OyKyUgugDLKTxsr04ADggzs+1YKQFxUUS80Xg3MzPbnjT0TupTJNUCz0iqkfSJVqzLzMzKrKGT1FcAR0fEPsD/Bq5snZLMzKwSNBQQGyLiBYCIeIKNrw01M7M2oKFzEHtJ+u6W5iPimvzKMjOzcmsoIH7LpqOG+vNmZrYd22JARMRlrVmImZlVllKexWRmZm1QbgEhqaOkJyU9Lek5SZel9r6SnpC0QNJtkjqk9p3SfHVa3iev2szMrHF5jiDeAY6PiEOBgcBnJA0GfgJcGxH9gBXAuan/ucCKiPgIcG3qZ2ZmZVLKK0f/b9H0TqVuOArWpNn26RMUHtlxR2qfAJyapkekedLyYZJU6v7MzGzrauhO6oslDQE+X9T8WFM2LqmdpLnAMuAB4EVgZURsSF1qgB5pugewCCAtXwV0zdjmaEkzJc2sra1tSjlmZtYEDY0g5gMjgf0k/U3SeKCrpP6lbjwi3o+IgUBPCu+1HpDVLX1njRZis4aI8RFRFRFV3bp1K7UUMzNrooYCYgVwCVANHAdcl9rHSGrSK0cjYiUwAxgMdJJUd3ltT2Bxmq4B9gVIy/cA3mzKfszMbOtpKCA+A9wD7A9cQ2EE8HZEnBMRjT64T1I3SZ3S9M7ACcA84CE2HrYaBUxJ01PTPGn59IjYbARhZmato6Eb5S4BkPQ08EfgMKCbpEcoXG10SiPb7g5MSK8t3QG4PSLulvQ8MEnS5cAc4IbU/wbgJknVFEYOZ7Tg5zIzsxYq5X0Qf4mIp4CnJH0jIoZK2rOxlSLiGQqhUr/9JQqjkfrt6ymc8zAzswrQ6GWuEXFx0eyXU5tfIGRmtp1r0o1yEfF0XoWYmVll8bOYzMwskwPCzMwyOSDMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMwsUyl3UltbMHaPcldQWcauKncFZmXnEYSZmWVyQJiZWSYHhJmZZWqz5yD6jLmn3CVUlFc6lrsCM6s0HkGYmVkmB4SZmWVyQJiZWSYHhJmZZcotICTtK+khSfMkPSfpW6m9i6QHJC1I351TuyRdJ6la0jOSBuVVm5mZNS7PEcQG4HsRMQAYDJwv6UBgDPBgRPQDHkzzACcC/dJnNDAux9rMzKwRuQVERCyJiNlpejUwD+gBjAAmpG4TgFPT9AhgYhQ8DnSS1D2v+szMrGGtcg5CUh/gMOAJYO+IWAKFEAH2St16AIuKVqtJbfW3NVrSTEkza2tr8yzbzKxNyz0gJO0G/Bn4dkS81VDXjLbYrCFifERURURVt27dtlaZZmZWT64BIak9hXC4OSImp+aldYeO0vey1F4D7Fu0ek9gcZ71mZnZluV5FZOAG4B5EXFN0aKpwKg0PQqYUtR+drqaaTCwqu5QlJmZtb48n8V0FPAl4B+S5qa2S4CrgNslnQu8CoxMy+4FTgKqgbXAOTnWZmZmjcgtICLiEbLPKwAMy+gfwPl51WNmZk3jO6nNzCyTA8LMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwyOSDMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwyOSDMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwy5RYQkn4vaZmkZ4vaukh6QNKC9N05tUvSdZKqJT0jaVBedZmZWWnyHEHcCHymXtsY4MGI6Ac8mOYBTgT6pc9oYFyOdZmZWQlyC4iIeBh4s17zCGBCmp4AnFrUPjEKHgc6SeqeV21mZta41j4HsXdELAFI33ul9h7AoqJ+NaltM5JGS5opaWZtbW2uxZqZtWWVcpJaGW2R1TEixkdEVURUdevWLeeyzMzartYOiKV1h47S97LUXgPsW9SvJ7C4lWszM7MirR0QU4FRaXoUMKWo/ex0NdNgYFXdoSgzMyuPHfPasKRbgeOAPSXVAJcCVwG3SzoXeBUYmbrfC5wEVANrgXPyqsvMzEqTW0BExBe2sGhYRt8Azs+rFjMza7pKOUltZmYVxgFhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmSoqICR9RtJ8SdWSxpS7HjOztqxiAkJSO+BXwInAgcAXJB1Y3qrMzNquigkI4AigOiJeioh3gUnAiDLXZGbWZu1Y7gKK9AAWFc3XAEfW7yRpNDA6za6RNL8VatvuCfYE3ih3HRXjMpW7AqvHv6P1tOx3tHcpnSopILJ+2tisIWI8MD7/ctoWSTMjoqrcdZhtiX9HW18lHWKqAfYtmu8JLC5TLWZmbV4lBcRTQD9JfSV1AM4Appa5JjOzNqtiDjFFxAZJFwB/AdoBv4+I58pcVlviw3ZW6fw72soUsdlhfjMzs4o6xGRmZhXEAWFmZpkcEG2cpN9LWibp2XLXYpZF0r6SHpI0T9Jzkr5V7praCp+DaOMkHQOsASZGxMfKXY9ZfZK6A90jYrak3YFZwKkR8XyZS9vueQTRxkXEw8Cb5a7DbEsiYklEzE7Tq4F5FJ68YDlzQJjZNkNSH+Aw4InyVtI2OCDMbJsgaTfgz8C3I+KtctfTFjggzKziSWpPIRxujojJ5a6nrXBAmFlFkyTgBmBeRFxT7nraEgdEGyfpVuAxoL+kGknnlrsms3qOAr4EHC9pbvqcVO6i2gJf5mpmZpk8gjAzs0wOCDMzy+SAMDOzTA4IMzPL5IAwM7NMDgizEkha04S+YyV9P6/tm7UWB4SZmWVyQJg1k6RTJD0haY6kv0rau2jxoZKmS1og6WtF61wk6SlJz0i6rAxlm5XMAWHWfI8AgyPiMGAScHHRskOAk4EhwP+TtI+kTwH9gCOAgcDH0/s4zCrSjuUuwGwb1hO4Lb3QpgPwctGyKRGxDlgn6SEKoTAU+BQwJ/XZjUJgPNx6JZuVzgFh1ny/AK6JiKmSjgPGFi2r/wybAARcGRG/aZ3yzFrGh5jMmm8P4LU0PareshGSOkrqChwHPAX8BfhKeq8BknpI2qu1ijVrKo8gzEqzi6SaovlrKIwY/iTpNeBxoG/R8ieBe4BewH9GxGJgsaQBwGOFJ1izBjgLWJZ/+WZN56e5mplZJh9iMjOzTA4IMzPL5IAwM7NMDggzM8vkgDAzs0wOCDMzy+SAMDOzTP8fuE1itErzFA4AAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAH2NJREFUeJzt3XucVWW9x/HPVwQBb1xEQ+4qIVaKOCrkJRO7qAlakdpFMopTalaWhp5TYmnZTctKjLTEu2YaZGZ6UI6VVxQ0FdFRQUYURgQEAQP6nT/2M7IZFjN7mFmzN8z3/Xrt117rWc9a67fnNcyXZ922IgIzM7P6til3AWZmVpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFbNEnTJX2xtddN6x8mac7mrp+xvb9KGpOmPy/pHy247c9IurultmdtgwPCKoKkuZKOKncddSRNkLRG0vL0ek7SryT1rOsTEX+PiEElbuu6xvpFxNERMbkFau8vKSRtW7Tt6yPiw83dtrUtDgizTbs5InYEugEnAO8CHisOiZagAv9btIrjX0qraJK6SrpDUq2kJWm6d71ue0p6RNIySVMkdStaf5ikByQtlfSEpCOaWkNErImIp4ETgVrgm2nbR0iqKdrXtyW9kkYccySNkPRR4DzgREkrJD2R+k6XdJGkfwIrgT0yDnlJ0i/T53pW0oiiBRuMuOqNUu5P70vTPofXP2Ql6f2SHk3bflTS+4uWTZf0fUn/TJ/lbkm7NPXnZls+B4RVum2A3wP9gL7AKuBX9fqcAnwB2B1YC1wGIKkX8BfgQgqjgG8Bf5TUY3MKiYh1wBTgsPrLJA0CzgAOTKOOjwBzI+Iu4AcURiM7RMR+Rat9DhgH7AjMy9jlwcCLwC7A+cBtxeHXgMPTe5e0zwfr1dqNws/lMqA7cAnwF0ndi7p9GjgV2BXoQOFnZ22MA8IqWkQsjog/RsTKiFgOXAR8oF63ayPiqYh4C/gO8ClJ7YDPAndGxJ0R8Z+IuAeYARzTjJIWUAib+tYB2wH7SGofEXMj4oVGtnV1RDwdEWsjYk3G8kXAz9MI5mZgDnBsM2qvcyzwfERcm/Z9I/AscFxRn99HxHMRsQq4BRjSAvu1LYwDwiqapM6SfiNpnqQ3KRw+6ZICoM78oul5QHsK/+vuB4xOh5eWSloKHAo05xxCL+CN+o0RUQ18HZgALJJ0k6TdG9nW/EaWvxIbPk1zHoVRUnPtzsYjlnkUPlud14qmVwI7tMB+bQvjgLBK901gEHBwROzE+sMnKurTp2i6L7AGeJ3CH+BrI6JL0Wv7iLh4cwpJJ5KPA/6etTwiboiIQykEUwA/qlu0iU029ijlXpKKP2dfCiMYgLeAzkXL3tWE7S5INRbrC7zSyHrWxjggrJK0l9Sx6LUthePzqyiccO1G4Vh8fZ+VtI+kzsD3gFvT+YLrgOMkfURSu7TNIzJOcjdIUntJg4EbKfwhviSjzyBJR0raDlidal6XFi8E+m/GlUq7Amem/Y8GBgN3pmWzgJPSsirgk0Xr1QL/AfbYxHbvBN4t6dOStpV0IrAPcEcT67OtnAPCKsmdFP6w1r0mAD8HOlEYETwE3JWx3rXA1RQOi3QEzgSIiPnAKApXEdVSGFGcTem/9ydKWgEsBaYCi4EDImJBRt/tgItTna9R+ON+Xlr2h/S+WNLjJe4b4GFgYNrmRcAnI2JxWvYdYE9gCXABcEPdShGxMvX/Zzq0Nqx4o2kbH6MwOlsMnAN8LCJeb0Jt1gbIXxhkZmZZPIIwM7NMDggzM8uUa0BI+oakpyU9JenGdJJwgKSHJT0v6WZJHVLf7dJ8dVreP8/azMysYbkFRLqL9UygKiLeC7QDTqJw6d+lETGQwgm2sWmVscCSiNgLuJT1lwiamVkZbNt4l2Zvv5OkNRSu2X4VOJLCbfwAkylcqTKRwtUmE1L7rcCvJCkaOIu+yy67RP/+/XMp3Mxsa/XYY4+9HhGNPnImt4CIiFck/RR4mcIli3cDjwFLI2Jt6lbD+rs3e5HuLI2ItZKWUXhOzAaX3kkaR+H5NfTt25cZM2bk9RHMzLZKkrKe/bWRPA8xdaUwKhhA4db+7YGjM7rWjRDUwLL1DRGTIqIqIqp69NisZ66ZmVkJ8jxJfRTwUkTUpgeR3Qa8n8JzdOpGLr1Z/+iAGtIjE9Lyncl45o2ZmbWOPAPiZWBYetiagBHAM8B9rH8swBgKj0+Gwp2qY9L0J4F7Gzr/YGZm+crzHMTDkm4FHqfwjP6ZwCQKz6G/SdKFqe2qtMpVwLWSqimMHE7KqzYzq2xr1qyhpqaG1atXl7uULVrHjh3p3bs37du336z1t+hHbVRVVYVPUpttfV566SV23HFHunfvzoYPtLVSRQSLFy9m+fLlDBgwYINlkh6LiKrGtuE7qc2s4qxevdrh0EyS6N69e7NGYQ4IM6tIDofma+7P0AFhZmaZ8r6T2sys2fqP/0uLbm/uxQ1/tXe7du143/vex9q1axk8eDCTJ0+mc+fODa6zKdOnT+enP/0pd9xxB1OnTuWZZ55h/PjxmX2XLl3KDTfcwGmnndakfUyYMIEddtiBb33rW5tV46Y4IMwqVEv/UdyS/HZkT9bULC3b/jt16sSsWbMA+MxnPsMVV1zBWWed9c7yiCAi2Gabph2EGTlyJCNHjtzk8qVLl3L55Zc3OSDy4kNMZmYNOOyww6iurmbu3LkMHjyY0047jaFDhzJ//nzuvvtuhg8fztChQxk9ejQrVqwA4K677mLvvffm0EMP5bbbbntnW1dffTVnnHEGAAsXLuSEE05gv/32Y7/99uOBBx5g/PjxvPDCCwwZMoSzzz4bgJ/85CcceOCB7Lvvvpx//vpv3L3ooosYNGgQRx11FHPmzMnls3sEYWZtz4KZDS+P/8CCmaxdu5a/3n4THz3i/bDwaebMmcPvfzyey//nS7z+xgtc+N3x/O91v2L7zp340a+v5pILzuacr4zhS1/4PPfe8hv2GtCHE788HlavKuxzyTx4qxYWzOTML3+bDxywL7f/+rusW7eOFW+t5OJvfI6nZs1g1p2/B+DuGy/n+Sce5JE//YaIYOTnv879t/Vh+86duOmmm5g5s1Dj0KFDOeCAA1r8x+SAMDOrZ9XqtxnyocK9uocdvD9jTz6eBQtr6de7J8MO2BeAhx77F8889xKHjDoVgH+vWcPwA/bl2eq5DOi7OwP36AvAZz9xDJOu++NG+7j3n49yzS++DxTOeey8044sWbZ8gz53/99D3P1/D7H/h08GYMXKlTz/0nyWr3iLE0444Z3zIg0dtmoOB4SZWT2dOm7HrHtu2qh9+86d3pmOCD50+MHcePkPN+gz66k5LXaJbkRw7hmn8l+f++QG7T//7fWtchmwz0GYmW2GYQfsyz8ffYLql14GYOWqVTz3wjz23qs/L728gBfmzgfgxj/dlbn+iEMPYuI1fwBg3bp1vLl8BTtu35nlK956p89HjhjO726eyoq3VgLwyquLWPT6Gxw+bCi33347q1atYvny5fz5z3/O5TN6BGFmFW/qGYew7zYvlbuMDfTo3pWrL53Ayaefx9v//jcAF55zOu/esx+TfvzfHHvK19ilWxcOPWgITz1bvdH6v/je2Yw750KuumkK7bbZhok/PJfhVftxyIFDeO+Rozn6g+/nJ9/5BrOff4nhIz8PwA6dO3HdLy9k6PsGc+KJJzJkyBD69evHYYcdlstn9LOYzCpUW7/Mdbe+e2zQVmkBUXa7719St9mzZzN48OAN2vwsJjMzaxYHhJmZZXJAmJlZJgeEmZllckCYmVkmB4SZmWXK7T4ISYOAm4ua9gC+C1yT2vsDc4FPRcQSFW4L/AVwDLAS+HxEPJ5XfWa25dj3yn4tu8Fx0xvtsrB2Md+Y8DMeevxfdN15Rzq0b885p43hhKOPzOw//YEZ/PSKa7jjmss2Wtb/4GOZ8dfr2KVb12YW3rpyG0FExJyIGBIRQ4ADKPzRvx0YD0yLiIHAtDQPcDQwML3GARPzqs3MrCERwfFfOIvDDx7Kiw/+mcfuuoGbJv6QmlcXlbu0VtVad1KPAF6IiHmSRgFHpPbJwHTg28Ao4Joo3Ln3kKQuknpGxKutVKOZGQD3/uMROnRoz5dPWf8MpH69d+erXziJ1avf5ivn/oAZT85m23btuOT8s/jgIQdusP7iN5Zy8unnUbt4CQcNeQ9b6g3JrXUO4iTgxjS9W90f/fS+a2rvBcwvWqcmtW1A0jhJMyTNqK2tzbFkM2urnn7uRYa+d+/MZb+++hYA/jXtFm68/AeM+fr5rF799gZ9Lrh0EoceNISZd9/IyA9/gJdfeS33mvOQ+whCUgdgJHBuY10z2jaK3YiYBEyCwqM2ml2gmVkjTj/vh/zjkVl06NCe3j1346unngjA3nsNoF/vd/Hci/M26H//Q49z25U/BeDYow6ja5edWr3mltAaI4ijgccjYmGaXyipJ0B6rzuoVwP0KVqvN7CgFeozM9vAe969B48/9ew787/+wblMu+UKahcvKflwUWs8jjtvrREQJ7P+8BLAVGBMmh4DTClqP0UFw4BlPv9gZuVw5KEHsfrtfzNx8h/eaVu5ajUAhx88lOtv/ysAz70wj5dfeY1Be/bfYP3Dhw3l+tsKff567z9ZsvTN1im8heV6iElSZ+BDwH8VNV8M3CJpLPAyMDq130nhEtdqClc8nZpnbWa25Xjyi/Na9WmukvjTVT/jGxN+xo8nTqZH965s36kTPzrvTEZ95Ai+PP4HvG/Ep9i2XTuuvvQCttuuwwbrn/+NcZx8+nkM/cin+cCwofTt9a5Wq70l+XHfZhXKj/v2474b5Md9m5lZuTggzMwskwPCzCpOEFvszWWVpLk/QweEmVWceUvXsHblmw6JZogIFi9eTMeOHTd7G631qA0zs5L98uElfBXo1+V1lO6hnS0/OWEDy2Y32qVjx4707t17s3fhgDCzivPm2//hovsXb9A2t+Ony1RNhZqwLPdd+BCTmZllckCYmVkmB4SZmWVyQJiZWSYHhJmZZXJAmJlZJgeEmZllckCYmVkmB4SZmWVyQJiZWSYHhJmZZXJAmJlZplwDQlIXSbdKelbSbEnDJXWTdI+k59N719RXki6TVC3pSUlD86zNzMwalvcI4hfAXRGxN7AfMBsYD0yLiIHAtDQPcDQwML3GARNzrs3MzBqQW0BI2gk4HLgKICL+HRFLgVHA5NRtMnB8mh4FXBMFDwFdJPXMqz4zM2tYniOIPYBa4PeSZkq6UtL2wG4R8SpAet819e8FzC9avya1bUDSOEkzJM2orfUXiJiZ5SXPgNgWGApMjIj9gbdYfzgpizLaNvq+wYiYFBFVEVHVo0ePlqnUzMw2kmdA1AA1EfFwmr+VQmAsrDt0lN4XFfXvU7R+b2BBjvWZmVkDcguIiHgNmC9pUGoaATwDTAXGpLYxwJQ0PRU4JV3NNAxYVncoyszMWl/e30n9VeB6SR2AF4FTKYTSLZLGAi8Do1PfO4FjgGpgZeprZmZlkmtARMQsoCpj0YiMvgGcnmc9ZmZWOt9JbWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpap0YCQtL2kbdL0uyWNlNQ+/9LMzKycShlB3A90lNQLmEbhq0CvzrMoMzMrv1ICQhGxEvg48MuIOAHYp5SNS5or6V+SZkmakdq6SbpH0vPpvWtql6TLJFVLelLS0M39UGZm1nwlBYSk4cBngL+ktqZ8l/UHI2JIRNR9N/V4YFpEDKQwIhmf2o8GBqbXOGBiE/ZhZmYtrJSA+BpwLnB7RDwtaQ/gvmbscxQwOU1PBo4var8mCh4Cukjq2Yz9mJlZMzQ6EoiI+ymch6ibfxE4s8TtB3C3pAB+ExGTgN0i4tW0rVcl7Zr69gLmF61bk9peLd6gpHEURhj07du3xDLMzKypGg0ISe8GvgX0L+4fEUeWsP1DImJBCoF7JD3b0K4y2mKjhkLITAKoqqraaLmZmbWMUs4l/AG4ArgSWNeUjUfEgvS+SNLtwEHAQkk90+ihJ7Aoda8B+hSt3htY0JT9mZlZyynlHMTaiJgYEY9ExGN1r8ZWSvdP7Fg3DXwYeAqYCoxJ3cYAU9L0VOCUdDXTMGBZ3aEoMzNrfaWMIP4s6TTgduDtusaIeKOR9XYDbpdUt58bIuIuSY8Ct0gaC7wMjE797wSOAaqBlRTutzAzszIpJSDq/rd/dlFbAHs0tFI6mb1fRvtiYERGewCnl1CPmZm1glKuYhrQGoWYmVllKeUqpvbAV4DDU9N0CpesrsmxLjMzK7NSDjFNBNoDl6f5z6W2L+ZVlJmZlV8pAXFgRBSfS7hX0hN5FWRmZpWhlMtc10nas24mPWqjSfdDmJnZlqeUEcTZwH2SXqRwt3M/fAmqmdlWr5SrmKZJGggMohAQz0bE242sZmZmW7hNBoSkIyPiXkkfr7doT0lExG0512ZmZmXU0AjiA8C9wHEZywJwQJiZbcU2GRARcX6a/F5EvFS8TJJvnjMz28qVchXTHzPabm3pQszMrLI0dA5ib+A9wM71zkPsBHTMuzAzMyuvhs5BDAI+BnRhw/MQy4Ev5VmUmZmVX0PnIKYAUyQNj4gHW7EmMzOrAKWcg/iypC51M5K6SvpdjjWZmVkFKCUg9o2IpXUzEbEE2D+/kszMrBKUEhDbSOpaNyOpG6U9osPMzLZgpfyh/xnwgKS6S1tHAxflV5KZmVWCRkcQEXEN8AlgIbAI+HhEXFvqDiS1kzRT0h1pfoCkhyU9L+lmSR1S+3Zpvjot7785H8jMzFrGJgNC0k7pvRvwGnADcD3wWmor1deA2UXzPwIujYiBwBJgbGofCyyJiL2AS1M/MzMrk4ZGEDek98eAGUWvuvlGSeoNHAtcmeYFHMn6O7EnA8en6VFpnrR8ROpvZmZl0NB9EB9L78157tLPgXOAHdN8d2BpRKxN8zVArzTdC5if9rlW0rLU//XiDUoaB4wD6Nu3bzNKMzOzhjT0qI2hDa0YEY83tFzSx4BFEfGYpCPqmrM2VcKy4v1OAiYBVFVVbbTczMxaRkNXMf0svXcEqoAnKPwR3xd4GDi0kW0fAoyUdEzaxk4URhRdJG2bRhG9gQWpfw3QB6iRtC2wM/BGkz+RmZm1iE2eg4iID0bEB4F5wNCIqIqIAyjcJFfd2IYj4tyI6B0R/YGTgHsj4jPAfcAnU7cxwJQ0PTXNk5bfGxEeIZiZlUkpN8rtHRH/qpuJiKeAIc3Y57eBsyRVUzjHcFVqvwrontrPAsY3Yx9mZtZMpdwoN1vSlcB1FM4JfJYNL1ttVERMB6an6ReBgzL6rKZwE56ZmVWAUgLiVOArFO5nALgfmJhbRWZmVhEaDYiIWC3pCuDOiJjTCjWZmVkFaPQchKSRwCzgrjQ/RNLUvAszM7PyKuUk9fkUzhksBYiIWUD/HGsyM7MKUEpArI2IZblXYmZmFaWUk9RPSfo00E7SQOBM4IF8yzIzs3IrZQTxVeA9wNsUHuC3DPh6nkWZmVn5NTiCkNQOuCAizgb+u3VKMjOzStDgCCIi1gEHtFItZmZWQUo5BzEzXdb6B+CtusaIuC23qszMrOxKCYhuwGIKX/RTJwAHhJnZVqyUgDg7Il5vvJuZmW1NGvpO6uMk1QJPSqqR9P5WrMvMzMqsoZPUFwGHRcTuwCeAH7ZOSWZmVgkaCoi1EfEsQEQ8zPrvlTYzszagoXMQu0o6a1PzEXFJfmWZmVm5NRQQv2XDUUP9eTMz24ptMiAi4oLWLMTMzCpLKc9i2iySOkp6RNITkp6WdEFqHyDpYUnPS7pZUofUvl2ar07L++dVm5mZNS63gKDwcL8jI2I/YAjwUUnDgB8Bl0bEQGAJMDb1HwssiYi9gEtTPzMzK5PcAiIKVqTZ9ukVFO7IvjW1TwaOT9Oj0jxp+QhJyqs+MzNrWClfOfo/RdPbNWXjktpJmgUsAu4BXgCWRsTa1KUG6JWmewHzAdLyZUD3jG2OkzRD0oza2tqmlGNmZk3Q0J3U50gaDnyyqPnBpmw8ItZFxBCgN4WvLR2c1a1ulw0sK97mpIioioiqHj16NKUcMzNrgoZGEHOA0cAekv4uaRLQXdKgpu4kIpYC04FhQBdJdVdP9QYWpOkaoA9AWr4z8EZT92VmZi2joYBYApwHVANHAJel9vGSGv3KUUk9JHVJ052Ao4DZwH2sH5WMAaak6alpnrT83ojYaARhZmato6Eb5T4KnA/sCVwCPAG8FRGnlrjtnsDk9K102wC3RMQdkp4BbpJ0ITATuCr1vwq4VlI1hZHDSU3+NGZm1mIaulHuPABJTwDXAfsDPST9g8LlqMc1tOGIeDKtU7/9RQrnI+q3r6ZwSMvMzCpAKd8H8beIeBR4VNJXIuJQSbvkXZiZmZVXo5e5RsQ5RbOfT23+AiEzs61ck26Ui4gn8irEzMwqS56P2jAzsy2YA8LMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwylXKjnLUFE3YudwWVZcKycldgVnYeQZiZWaY2O4LoP/4v5S6hosztWO4KzKzSeARhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWKbeAkNRH0n2SZkt6WtLXUns3SfdIej69d03tknSZpGpJT0oamldtZmbWuDxHEGuBb0bEYGAYcLqkfYDxwLSIGAhMS/MARwMD02scMDHH2szMrBG5BUREvBoRj6fp5cBsoBcwCpicuk0Gjk/To4BrouAhoIuknnnVZ2ZmDWuVcxCS+gP7Aw8Du0XEq1AIEWDX1K0XML9otZrUZmZmZZB7QEjaAfgj8PWIeLOhrhltkbG9cZJmSJpRW1vbUmWamVk9uQaEpPYUwuH6iLgtNS+sO3SU3hel9hqgT9HqvYEF9bcZEZMioioiqnr06JFf8WZmbVyeVzEJuAqYHRGXFC2aCoxJ02OAKUXtp6SrmYYBy+oORZmZWevL82muhwCfA/4laVZqOw+4GLhF0ljgZWB0WnYncAxQDawETs2xNjMza0RuARER/yD7vALAiIz+AZyeVz1mZtY0vpPazMwyOSDMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwyOSDMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwyOSDMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMwsU24BIel3khZJeqqorZukeyQ9n967pnZJukxStaQnJQ3Nqy4zMytNniOIq4GP1msbD0yLiIHAtDQPcDQwML3GARNzrMvMzEqQW0BExP3AG/WaRwGT0/Rk4Pii9mui4CGgi6SeedVmZmaNa+1zELtFxKsA6X3X1N4LmF/Urya1mZlZmVTKSWpltEVmR2mcpBmSZtTW1uZclplZ29XaAbGw7tBRel+U2muAPkX9egMLsjYQEZMioioiqnr06JFrsWZmbVlrB8RUYEyaHgNMKWo/JV3NNAxYVncoyszMymPbvDYs6UbgCGAXSTXA+cDFwC2SxgIvA6NT9zuBY4BqYCVwal51mZlZaXILiIg4eROLRmT0DeD0vGoxM7Omq5ST1GZmVmEcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkqKiAkfVTSHEnVksaXux4zs7asYgJCUjvg18DRwD7AyZL2KW9VZmZtV8UEBHAQUB0RL0bEv4GbgFFlrsnMrM3attwFFOkFzC+arwEOrt9J0jhgXJpdIWlOK9S21RPsArxe7joqxgUqdwVWj39H62ne72i/UjpVUkBkfdrYqCFiEjAp/3LaFkkzIqKq3HWYbYp/R1tfJR1iqgH6FM33BhaUqRYzszavkgLiUWCgpAGSOgAnAVPLXJOZWZtVMYeYImKtpDOAvwHtgN9FxNNlLqst8WE7q3T+HW1litjoML+ZmVlFHWIyM7MK4oAwM7NMDog2TtLvJC2S9FS5azHLIqmPpPskzZb0tKSvlbumtsLnINo4SYcDK4BrIuK95a7HrD5JPYGeEfG4pB2Bx4DjI+KZMpe21fMIoo2LiPuBN8pdh9mmRMSrEfF4ml4OzKbw5AXLmQPCzLYYkvoD+wMPl7eStsEBYWZbBEk7AH8Evh4Rb5a7nrbAAWFmFU9SewrhcH1E3FbuetoKB4SZVTRJAq4CZkfEJeWupy1xQLRxkm4EHgQGSaqRNLbcNZnVcwjwOeBISbPS65hyF9UW+DJXMzPL5BGEmZllckCYmVkmB4SZmWVyQJiZWSYHhJmZZXJAmJVA0oom9J0g6Vt5bd+stTggzMwskwPCbDNJOk7Sw5JmSvpfSbsVLd5P0r2Snpf0paJ1zpb0qKQnJV1QhrLNSuaAMNt8/wCGRcT+wE3AOUXL9gWOBYYD35W0u6QPAwOBg4AhwAHp+zjMKtK25S7AbAvWG7g5faFNB+ClomVTImIVsErSfRRC4VDgw8DM1GcHCoFxf+uVbFY6B4TZ5vslcElETJV0BDChaFn9Z9gEIOCHEfGb1inPrHl8iMls8+0MvJKmx9RbNkpSR0ndgSOAR4G/AV9I32uApF6Sdm2tYs2ayiMIs9J0llRTNH8JhRHDHyS9AjwEDCha/gjwF6Av8P2IWAAskDQYeLDwBGtWAJ8FFuVfvlnT+WmuZmaWyYeYzMwskwPCzMwyOSDMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMws0/8DZOE0CX06nUcAAAAASUVORK5CYII=\n", "text/plain": [ "
" ] @@ -634,7 +660,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAH5tJREFUeJzt3Xu8HVV99/HPl4ggcgkQ0JBEIxpUoBoxIFRbEawiKqAPWBDlIhpRqFotCmpfaC0KbRFLtWAUBBXk5oVUsMrDRUQBPYFwjTxEDCQkEuQSLgFK4vf5Y9axO4fJPnOSs8/eOef7fr32a8+sWTPz2ys5+7fXXNbINhEREQOt1+0AIiKiNyVBRERErSSIiIiolQQRERG1kiAiIqJWEkRERNRKgoi1Iulzkr67huseJumaNsuvkvT+Mn2wpJ+t4X5uk7T7mqy7hvuzpJeM9Lpl/U9L+uaarl+zvcckbVumz5L0z8O47dMl/eNwbS+GXxLEGCRpgaQnyh//fZK+JWnjbsfVju1zbL9psHp1X2K2d7B91VD2J2lq+bJ+1hBD7ZiSMJ+U9KikRyTNkXSspA3669j+ou33N9zWoPVsb2z7rmGI/Rk/BmwfafsLa7vt6JwkiLHr7bY3BnYCdgY+O7CCKvk/0luOtr0JMBH4BHAgcKkkDedOeikxRvfkj3+Ms30v8BNgR/jzL8sTJP0SWA5sK2kbSbMlPShpvqQPDNjMhpLOL79sb5D0yv4F5Rfu78qy2yW9Y8C6kvQfkpZJ+q2kPevibP0FWhLXKZKWlvVulrSjpJnAwcAnS+/ov0r9BZLeWKbHlcMw/THNkTRlKG0maRdJ10p6WNISSV+V9OwB1faWdJekP0r619ZEK+l9kuZJekjSTyW9cCj7B7D9eOkV7QPsBry1bPvPh/wkbSjpu5IeKLH+RtLzJJ0A/BXw1dJOXy31LekoSXcCd7aUtR7ymiDpstJ2P++Pva7H1d9LkfRy4HRgt7K/h8vyVXp7kj5Q/n89WP6/bdOyzJKOlHRnabevDXdSjGdKghjjypfj3sCNLcXvBWYCmwB3A98DFgHbAPsDXxzwRb4vcCGwBXAu8CNJ65dlv6P6MtoM+DzwXUkTW9Z9DXAXMAE4HviBpC0GCftNwF8D2wHjgb8FHrA9CzgH+JdyaOTtNet+HDiofOZNgfdRJcKhWAn8fYl5N2BP4MMD6rwDmEHVQ9u37AdJ+wGfBt4JbAX8gqp914jte4A+qjYe6FCqdp8CbAkcCTxh+zNlv0eXdjq6ZZ39qP5Ntl/NLg8GvkD12edStfdgMc4r+7627G/8wDqS9gC+BLyLqnd0N3DegGpvo+rtvrLUe/Ng+461kwQxdv2o/JK7Bvg58MWWZWfZvs32CuD5wOuAT9l+0vZc4JtUSaTfHNsX2X4a+DKwIbArgO0LbS+2/Sfb51P9Mt2lZd2lwFdsP12W30H5NdzG01TJ62WAbM+zvaTh534/8Fnbd7hyk+0HGq5L+UxzbF9ne4XtBcDXgdcPqHaS7QfLF/hXqJISwAeBL5WYV1C1+/Q16UW0WEyVnAd6mioxvMT2yhL3I4Ns60sl7idWs/wS21fbfgr4DFWvYEg9sNU4GDjT9g1l28eVbU9tqXOi7YdLm14JTB+G/UYbSRBj1362x9t+oe0PD/hCWNgyvQ3woO1HW8ruBibV1bf9J/63t4GkQyTNLYc4HqY6lDWhZd17veqIkXf3r7s6tq8Avgp8DbhP0ixJmw72gYspVL2aNSZpO0k/lvQHSY9QfclPGFCttQ1bP9MLgX9vaY8HAbFqew7VpLKdgb4D/BQ4T9JiSf/S0rNbnYVNl9t+rOy37b9XQ9tQtVPrth9g1Xb5Q8v0cqCnL6wYDZIgok7rF/ZiYAtJm7SUvQC4t2X+z78gy7H2ycDi8qv4G8DRwJbl0MKtVF+I/SYNOJb8grLP9gHap9p+NbAD1aGmY2pir7MQePFg2x/EacBvgWm2N6U6ZDTweHjrr+rWz7QQ+GBJzv2v59j+1ZoEUn69v5rqkNEqSq/s87a3B/6S6hDNIf2LV7PJwdqv9d96Y6qey2Lg8VK8UUvd5w9hu4upkmf/tp9L1fu5d7VrRMclQURbthcCvwK+VE56vgI4glWPPb9a0jvLCcqPAU8B1wHPpfpiuB9A0uGUk+EttgY+Iml9SQcALwcubReTpJ0lvab8Gn4ceJLqvADAfcC2bVb/JvAFSdPKye5XSNqyTf0Nyufuf61HdXjrEeAxSS8DPlSz3jGSNi9f4B8Fzi/lpwPHSdqhfJbNyuceEkkbSXo9cDHwa2raTNIbJP2FpHEl3qdp3k6rs7ek15WT8l8Arre90Pb9VF/m71F1IcD7WDUR3wdMrjmZ3+9c4HBJ01VdtvvFsu0FaxBjDJMkiGjiIGAq1a+8HwLH276sZfnFVCeKH6I6N/HO8uv1duBk4FqqL4i/AH45YNvXA9OAPwInAPs3OCewKVXP5CGqwxIPAP9Wlp0BbF8O4fyoZt0vAxcAP6P60jwDeE6bfT0GPNHy2gP4B+DdwKMljvNr1rsYmEN1IveSsh9s/xA4ieqwzyNUPaq3DPJ5W31V0qNU7fkV4PvAXuXQ3kDPBy4qn3Me1bmm/psa/x3Yv1wRdOoQ9n8u1cUED1L1XA5uWfYBqp7cA1Q9u9Ze0RXAbcAfJP1x4EZtXw78Y/k8S6iSy4FDiCs6QHlgUERE1EkPIiIiaiVBRERErSSIiIio1fEEUa5ouFHSj8v8iyRdX26ZP7//qgZJG5T5+WX51E7HFhERqzcSA3J9lOoKiv4bmU4CTrF9nqTTqS6ZPK28P2T7JZIOLPX+tt2GJ0yY4KlTp3Ys8IiI0WjOnDl/tL3VYPU6ehWTpMnA2VSXL34ceDvVNfHPt71C0m7A52y/WdJPy/S15Xr6PwBbuU2AM2bMcF9fX8fij4gYjSTNsT1jsHqdPsT0FeCTQP812lsCD5cxaKAakqH/VvpJlNv4y/JlpX5ERHRBxxKEpLcBS23PaS2uqeoGy1q3O1NSn6S++++/fxgijYiIOp3sQbwW2EfSAqphe/eg6lGMbxkzfjL/O0bNIso4L2X5ZtQMQGZ7lu0ZtmdstdWgh9AiImINdSxB2D7O9mTbU6lumb/C9sFUw/TuX6odSjUkAcDsMk9ZfkW78w8REdFZ3bgP4lPAxyXNpzrHcEYpPwPYspR/HDi2C7FFREQxIs+dLY9GvKpM38WqD4zpr/MkMORRLSMiojNyJ3VERNRKgoiIiFpJEBERUWtEzkF0yi33LmPqsZes1TYWnPjWYYomImJ0SQ8iIiJqJUFEREStJIiIiKiVBBEREbWSICIiolYSRERE1EqCiIiIWkkQERFRKwkiIiJqJUFEREStJIiIiKiVBBEREbWSICIiolYSRERE1OpYgpC0oaRfS7pJ0m2SPl/Kz5L0e0lzy2t6KZekUyXNl3SzpJ06FVtERAyuk8+DeArYw/ZjktYHrpH0k7LsGNsXDaj/FmBaeb0GOK28R0REF3SsB+HKY2V2/fJym1X2Bb5d1rsOGC9pYqfii4iI9jp6DkLSOElzgaXAZbavL4tOKIeRTpG0QSmbBCxsWX1RKYuIiC7oaIKwvdL2dGAysIukHYHjgJcBOwNbAJ8q1VW3iYEFkmZK6pPUt3L5sg5FHhERI3IVk+2HgauAvWwvKYeRngK+BexSqi0CprSsNhlYXLOtWbZn2J4xbqPNOhx5RMTY1cmrmLaSNL5MPwd4I/Db/vMKkgTsB9xaVpkNHFKuZtoVWGZ7Safii4iI9jp5FdNE4GxJ46gS0QW2fyzpCklbUR1SmgscWepfCuwNzAeWA4d3MLaIiBhExxKE7ZuBV9WU77Ga+gaO6lQ8ERExNLmTOiIiaiVBRERErSSIiIiolQQRERG1kiAiIqJWEkRERNRKgoiIiFpJEBERUSsJIiIiaiVBRERErSSIiIiolQQRERG1kiAiIqJWEkRERNRKgoiIiFpJEBERUSsJIiIiaiVBRERErY4lCEkbSvq1pJsk3Sbp86X8RZKul3SnpPMlPbuUb1Dm55flUzsVW0REDK6TPYingD1svxKYDuwlaVfgJOAU29OAh4AjSv0jgIdsvwQ4pdSLiIgu6ViCcOWxMrt+eRnYA7iolJ8N7Fem9y3zlOV7SlKn4ouIiPY6eg5C0jhJc4GlwGXA74CHba8oVRYBk8r0JGAhQFm+DNiyZpszJfVJ6lu5fFknw4+IGNM6miBsr7Q9HZgM7AK8vK5aea/rLfgZBfYs2zNszxi30WbDF2xERKxiRK5isv0wcBWwKzBe0rPKosnA4jK9CJgCUJZvBjw4EvFFRMQzdfIqpq0kjS/TzwHeCMwDrgT2L9UOBS4u07PLPGX5Fbaf0YOIiIiR8azBq6yxicDZksZRJaILbP9Y0u3AeZL+GbgROKPUPwP4jqT5VD2HAzsYW0REDKJjCcL2zcCrasrvojofMbD8SeCATsUTERFDkzupIyKiVhJERETUSoKIiIhaSRAREVErCSIiImolQURERK0kiIiIqJUEERERtZIgIiKiVhJERETUSoKIiIhaSRAREVErCSIiImolQURERK0kiIiIqDVogpD0XEnrlentJO0jaf3OhxYREd3UpAdxNbChpEnA5cDhwFmdDCoiIrqvSYKQ7eXAO4H/sP0OYPvOhhUREd3WKEFI2g04GLiklA36qFJJUyRdKWmepNskfbSUf07SvZLmltfeLescJ2m+pDskvXlNPlBERAyPJs+k/ihwHPBD27dJ2ha4ssF6K4BP2L5B0ibAHEmXlWWn2P631sqStgcOBHYAtgH+r6TtbK9s+mEiImL4DJogbF9NdR6if/4u4CMN1lsCLCnTj0qaB0xqs8q+wHm2nwJ+L2k+sAtw7WD7ioiI4dfkKqbtJM2S9DNJV/S/hrITSVOBVwHXl6KjJd0s6UxJm5eyScDCltUWUZNQJM2U1Cepb+XyZUMJIyIihqDJIaYLgdOBbwJDPtwjaWPg+8DHbD8i6TTgC4DL+8nA+wDVrO5nFNizgFkAG0yc9ozlERExPJokiBW2T1uTjZf7Jb4PnGP7BwC272tZ/g3gx2V2ETClZfXJwOI12W9ERKy9Jlcx/ZekD0uaKGmL/tdgK0kScAYwz/aXW8ontlR7B3BrmZ4NHChpA0kvAqYBv278SSIiYlg16UEcWt6PaSkzsO0g670WeC9wi6S5pezTwEGSppdtLAA+CFCukLoAuJ3qCqijcgVTRET3NLmK6UVrsmHb11B/XuHSNuucAJywJvuLiIjh1eSGt/WBDwF/XYquAr5u++kOxhUREV3W5BDTacD6wH+W+feWsvd3KqiIiOi+JgliZ9uvbJm/QtJNnQooIiJ6Q5OrmFZKenH/TBlqIyePIyJGuSY9iGOAKyXdRXXS+YVUQ35HRMQo1uQqpsslTQNeSpUgflvGS4qIiFFstQlC0h62r5D0zgGLXiyJ/jujIyJidGrXg3g9cAXw9pplBpIgIiJGsdUmCNvHl8l/sv371mVlKIyIiBjFmlzF9P2asouGO5CIiOgt7c5BvIzq6W6bDTgPsSmwYacDi4iI7mp3DuKlwNuA8ax6HuJR4AOdDCoiIrqv3TmIi4GLJe1mO4/9jIgYY5qcgzhS0vj+GUmbSzqzgzFFREQPaJIgXmH74f4Z2w9RPV86IiJGsSYJYj1Jm/fPlKfJNRmiIyIi1mFNvuhPBn4lqf/S1gPIQ30iIka9JmMxfVtSH7AH1VhM77R9e8cji4iIrlrtISZJm5b3LYA/AOcC5wB/KGVtSZoi6UpJ8yTdJumj/duTdJmkO8v75qVckk6VNF/SzZJ2Go4PGBERa6bdOYhzy/scoK/l1T8/mBXAJ2y/HNgVOErS9sCxwOW2pwGXl3mAtwDTymsm1VPrIiKiS9rdB/G28r5G4y7ZXgIsKdOPSpoHTAL2BXYv1c6mesb1p0r5t20buE7SeEkTy3YiImKEtRtqo+0hHts3NN2JpKlUl8ZeDzyv/0vf9hJJW5dqk4CFLastKmWrJAhJM6l6GIzbdKumIURExBC1O0l9cnnfEJgB3ER1kvoVVF/0r2uyA0kbUw349zHbj0habdWaMj+jwJ4FzALYYOK0ZyyPiIjhsdpzELbfYPsNwN3ATrZn2H41VU9gfpONS1qfKjmc0/KAofskTSzLJwJLS/kiYErL6pOBxUP5MBERMXya3Cj3Mtu39M/YvhWYPthKqroKZwDzbH+5ZdFs4NAyfShwcUv5IeVqpl2BZTn/EBHRPU1ulJsn6ZvAd6kO+bwHmNdgvdcC7wVukTS3lH0aOBG4QNIRwD1UN94BXArsTdU7WQ4c3vRDRETE8GuSIA4HPgR8tMxfTYNLUG1fQ/15BYA9a+obOKpBPBERMQKa3En9pKTTgUtt3zECMUVERA8Y9ByEpH2AucB/l/npkmZ3OrCIiOiuJiepjwd2AR4GsD0XmNrBmCIiogc0SRArbC/reCQREdFTmpykvlXSu4FxkqYBHwF+1dmwIiKi25r0IP4O2AF4imoAv2XAxzoZVEREdF/bHoSkccDnbR8DfGZkQoqIiF7QtgdheyXw6hGKJSIiekiTcxA3lstaLwQe7y9sGVspIiJGoSYJYgvgAapHjvYzkAQRETGKNUkQx9j+Y8cjiYiIntLumdRvl3Q/cLOkRZL+cgTjioiILmt3kvoE4K9sbwP8H+BLIxNSRET0gnYJYoXt3wLYvh7YZGRCioiIXtDuHMTWkj6+uvkBDwGKiIhRpl2C+Aar9hoGzkdExCi22gRh+/MjGUhERPSWJmMxRUTEGNSxBCHpTElLJd3aUvY5SfdKmltee7csO07SfEl3SHpzp+KKiIhmOtmDOAvYq6b8FNvTy+tSAEnbAwdSjRq7F/CfZaDAiIjokiaPHP1sy/QGTTds+2rgwYbV9wXOs/2U7d8D86meYhcREV3S7k7qT0raDdi/pfjaYdjn0ZJuLoegNi9lk4CFLXUWlbK6uGZK6pPUt3J5HnQXEdEp7XoQdwAHANtK+oWkWcCWkl66Fvs7DXgxMB1YApxcylVT13UbsD3L9gzbM8ZttNlahBIREe20SxAPAZ+mOtyzO3BqKT9W0ho9ctT2fbZX2v4T1X0V/YeRFgFTWqpOBhavyT4iImJ4tEsQewGXUP3i/zLVl/njtg+3vUYD90ma2DL7DqD/CqfZwIGSNpD0ImAa8Os12UdERAyPdjfKfRpA0k3Ad4FXAVtJugZ4yPbb221Y0veoeh4TJC0Cjgd2lzSd6vDRAuCDZV+3SboAuB1YARxVnmYXERFd0uR5ED+1/RvgN5I+ZPt1kiYMtpLtg2qKz2hT/wSqEWQjIqIHDHqZq+1PtsweVsryAKGIiFFuSDfK2b6pU4FERERvyVhMERFRKwkiIiJqJUFEREStJIiIiKiVBBEREbWSICIiolYSRERE1EqCiIiIWk2G2hjVph57yVqtv+DEtw5TJBERvSU9iIiIqJUEERERtZIgIiKiVhJERETUSoKIiIhaSRAREVErCSIiImp1LEFIOlPSUkm3tpRtIekySXeW981LuSSdKmm+pJsl7dSpuCIioplO9iDOAvYaUHYscLntacDlZR7gLcC08poJnNbBuCIiooGOJQjbVwMPDijeFzi7TJ8N7NdS/m1XrgPGS5rYqdgiImJwI30O4nm2lwCU961L+SRgYUu9RaXsGSTNlNQnqW/l8mUdDTYiYizrlZPUqilzXUXbs2zPsD1j3EabdTisiIixa6QTxH39h47K+9JSvgiY0lJvMrB4hGOLiIgWI50gZgOHlulDgYtbyg8pVzPtCizrPxQVERHd0bHhviV9D9gdmCBpEXA8cCJwgaQjgHuAA0r1S4G9gfnAcuDwTsUVERHNdCxB2D5oNYv2rKlr4KhOxRIREUPXKyepIyKixyRBRERErSSIiIiolQQRERG1kiAiIqJWEkRERNRKgoiIiFoduw9irJh67CVrtf6CE986TJFERAyv9CAiIqJWEkRERNRKgoiIiFpJEBERUSsJIiIiaiVBRERErSSIiIiolQQRERG1kiAiIqJW7qTustyJHRG9qisJQtIC4FFgJbDC9gxJWwDnA1OBBcC7bD/UjfgiIrqll340dvMQ0xtsT7c9o8wfC1xuexpweZmPiIgu6aVzEPsCZ5fps4H9uhhLRMSY160EYeBnkuZImlnKnmd7CUB537puRUkzJfVJ6lu5fNkIhRsRMfZ06yT1a20vlrQ1cJmk3zZd0fYsYBbABhOnuVMBRkSMdV3pQdheXN6XAj8EdgHukzQRoLwv7UZsERFRGfEEIem5kjbpnwbeBNwKzAYOLdUOBS4e6dgiIuJ/deMQ0/OAH0rq3/+5tv9b0m+ACyQdAdwDHNCF2CIiohjxBGH7LuCVNeUPAHuOdDwREVEvd1JHRAyTtb3Jrdf00n0QERHRQ5IgIiKiVhJERETUSoKIiIhaSRAREVErCSIiImrlMtfouuG4NDAPThoduv0shNF2meraSoJYx+XLdXh0+4spohclQUS+HCOiVhJERAyLXjg80wsxjCZJELHWeuGPshdiWBs5VBi9KFcxRURErfQgIgJY93thMfzSg4iIiFpJEBERUSuHmCKGQQ7PxGiUHkRERNTquR6EpL2AfwfGAd+0fWKXQ4pYJ6QXE8Otp3oQksYBXwPeAmwPHCRp++5GFRExNvVUggB2Aebbvsv2/wDnAft2OaaIiDGp1w4xTQIWtswvAl7TWkHSTGBmmX3q7pPedusIxdbLJgB/7HYQXZY2SBv0G9PtoJOAwdvghU221WsJQjVlXmXGngXMApDUZ3vGSATWy9IOaQNIG/RLOwxfG/TaIaZFwJSW+cnA4i7FEhExpvVagvgNME3SiyQ9GzgQmN3lmCIixqSeOsRke4Wko4GfUl3meqbt29qsMmtkIut5aYe0AaQN+qUdhqkNZHvwWhERMeb02iGmiIjoEUkQERFRa51IEJL2knSHpPmSjq1ZvoGk88vy6yVNHfkoO6tBG3xc0u2SbpZ0uaRG1zmvawZrh5Z6+0uypFF3uWOTNpD0rvL/4TZJ5450jJ3W4O/hBZKulHRj+ZvYuxtxdpKkMyUtlVR7L5gqp5Y2ulnSTkPeie2eflGdrP4dsC3wbOAmYPsBdT4MnF6mDwTO73bcXWiDNwAblekPjbY2aNoOpd4mwNXAdcCMbsfdhf8L04Abgc3L/NbdjrsLbTAL+FCZ3h5Y0O24O9AOfw3sBNy6muV7Az+hur9sV+D6oe5jXehBNBl+Y1/g7DJ9EbCnpLqb7tZVg7aB7SttLy+z11HdQzLaNB2K5QvAvwBPjmRwI6RJG3wA+JrthwBsLx3hGDutSRsY2LRMb8YovJ/K9tXAg22q7At825XrgPGSJg5lH+tCgqgbfmPS6urYXgEsA7YckehGRpM2aHUE1S+H0WbQdpD0KmCK7R+PZGAjqMn/he2A7ST9UtJ1ZYTk0aRJG3wOeI+kRcClwN+NTGg9ZajfG8/QU/dBrMagw280rLMua/z5JL0HmAG8vqMRdUfbdpC0HnAKcNhIBdQFTf4vPIvqMNPuVD3JX0ja0fbDHY5tpDRpg4OAs2yfLGk34DulDf7U+fB6xlp/L64LPYgmw2/8uY6kZ1F1Kdt1vdY1jYYgkfRG4DPAPrafGqHYRtJg7bAJsCNwlaQFVMddZ4+yE9VN/x4utv207d8Dd1AljNGiSRscAVwAYPtaYEOqAezGkrUeumhdSBBNht+YDRxapvcHrnA5SzNKDNoG5dDK16mSw2g75tyvbTvYXmZ7gu2ptqdSnYvZx3Zfd8LtiCZ/Dz+iumgBSROoDjndNaJRdlaTNrgH2BNA0supEsT9Ixpl980GDilXM+0KLLO9ZCgb6PlDTF7N8BuS/gnosz0bOIOqCzmfqudwYPciHn4N2+BfgY2BC8v5+Xts79O1oDugYTuMag3b4KfAmyTdDqwEjrH9QPeiHl4N2+ATwDck/T3VYZXDRtmPRiR9j+ow4oRyruV4YH0A26dTnXvZG5gPLAcOH/I+RlmbRUTEMFkXDjFFREQXJEFEREStJIiIiKiVBBEREbWSICIiolYSRIw5klZKmivpVkkXStpoiOs/NsT6Z0nav6Z8hqRTy/Rhkr5apo+UdEhL+TZD2V/EcEmCiLHoCdvTbe8I/A9wZOvCcmNRx/82bPfZ/khN+em2v11mDwOSIKIrkiBirPsF8BJJUyXNk/SfwA3AFEkHSbql9DROal1J0smSbijP3tiqlH1A0m8k3STp+wN6Jm+U9AtJ/0/S20r93SU9Y1BBSZ+T9A+l1zEDOKf0eN4q6Yct9f5G0g+Gv0kiKkkQMWaVcbveAtxSil5KNTzyq4CngZOAPYDpwM6S9iv1ngvcYHsn4OdUd7AC/MD2zrZfCcyjGg+o31SqARTfCpwuacPB4rN9EdAHHGx7OtWdsS/vT0hUd8Z+a8gfPKKhJIgYi54jaS7Vl+89VEO1ANxdxs0H2Bm4yvb9ZQj5c6ge0ALwJ+D8Mv1d4HVlesfSS7gFOBjYoWWfF9j+k+07qcZFetlQgy5DRXyHahjr8cBujM5h3aNH9PxYTBEd8ET5Rf5nZfyqx1uLhrC9/vFqzgL2s32TpMOoxskZWGd18019C/gvqochXViSV0RHpAcRUe964PWSJkgaR/V8gZ+XZetRjRoM8G7gmjK9CbBE0vpUPYhWB0haT9KLqR6VeUfDOB4t2wXA9mKqIZs/S5WQIjomPYiIGraXSDoOuJKqN3Gp7YvL4seBHSTNoXp64d+W8n+kSix3U53X2KRlk3dQJZjnAUfafrLhU3HPojpn8QSwm+0nqA53bWX79rX4iBGDymiuEeuYcr/EjbbPGLRyxFpIgohYh5Rey+PA34zSpwZGD0mCiIiIWjlJHRERtZIgIiKiVhJERETUSoKIiIhaSRAREVHr/wM/sus/l8NcPwAAAABJRU5ErkJggg==\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAHpRJREFUeJzt3XmYXFWd//H3h4AgsgQIaAjRiAYRGIkYEMYNwVHAJeiAgiiLaERl1FFRcHnUQRScQZRRwQgILiiLC1FxlB+LiAqaQFgjPyICCYkgW1gCDAmf+eOe1qK53V2d9K2qdH9ez9NP33vuube+ddKpb51z7iLbRERE9LdGtwOIiIjelAQRERG1kiAiIqJWEkRERNRKgoiIiFpJEBERUSsJIlaJpM9I+u5K7nuwpMsG2X6JpHeW5QMk/WolX+d6SbuuzL4r+XqW9NxO71v2/7ikU1Z2/5rjPShpy7J8uqTPjeCxT5b0qZE6Xoy8JIgxSNItkh4u//nvkPQtSet1O67B2P6e7VcPVa/uQ8z2trYvGc7rSZpSPqzXHGaojSkJ8xFJD0i6X9JcSUdKWruvju3P235nm8casp7t9WzfPAKxP+nLgO3DbB+9qseO5iRBjF2vt70esAOwI/DJ/hVUyd9Ibznc9vrARODDwH7A+ZI0ki/SS4kxuif/+cc427cDvwC2g79/szxG0m+BZcCWkjaXNFvSPZIWSHpXv8OsI+ms8s32Sknb920o33D/XLbdIOmN/faVpP+WtFTSnyTtXhdn6zfQkrhOkHRn2e8aSdtJmgkcAHy09I5+WurfIulVZXlcGYbpi2mupMnDaTNJO0n6vaT7JC2R9FVJT+lXbS9JN0u6S9J/tiZaSe+QNF/SvZJ+KelZw3l9ANsPlV7RG4BdgNeWY/99yE/SOpK+K+nuEusfJT1d0jHAy4Cvlnb6aqlvSe+TdBNwU0tZ65DXBEkXlLb7dV/sdT2uvl6KpOcDJwO7lNe7r2x/Qm9P0rvK39c95e9t85ZtlnSYpJtKu31tpJNiPFkSxBhXPhz3Aq5qKX47MBNYH7gV+D6wCNgc2Af4fL8P8hnAOcDGwJnATyStVbb9merDaEPgs8B3JU1s2ffFwM3ABODTwI8kbTxE2K8GXg5sBYwH3gLcbXsW8D3gi2Vo5PU1+34I2L+85w2Ad1AlwuFYAfx7iXkXYHfgvf3qvBGYTtVDm1FeB0l7Ax8H3gRsCvyGqn1Xiu3bgDlUbdzfQVTtPhnYBDgMeNj2J8rrHl7a6fCWffam+jfZZoCXPAA4muq9z6Nq76FinF9e+/fl9cb3ryNpN+ALwJupeke3Aj/oV+11VL3d7Uu91wz12rFqkiDGrp+Ub3KXAb8GPt+y7XTb19teDjwDeCnwMduP2J4HnEKVRPrMtX2u7ceALwHrADsD2D7H9mLbj9s+i+qb6U4t+94JfNn2Y2X7jZRvw4N4jCp5bQ3I9nzbS9p83+8EPmn7Rleutn13m/tS3tNc25fbXm77FuAbwCv6VTvO9j3lA/zLVEkJ4N3AF0rMy6nafdrK9CJaLKZKzv09RpUYnmt7RYn7/iGO9YUS98MDbP+57UttPwp8gqpXMKwe2AAOAE6zfWU59lHl2FNa6hxr+77SphcD00bgdWMQSRBj1962x9t+lu339vtAWNiyvDlwj+0HWspuBSbV1bf9OP/obSDpQEnzyhDHfVRDWRNa9r3dT7xj5K19+w7E9kXAV4GvAXdImiVpg6HecDGZqlez0iRtJelnkv4q6X6qD/kJ/aq1tmHre3oW8JWW9rgHEE9sz+GaVI7T33eAXwI/kLRY0hdbenYDWdjudtsPltcd9N+rTZtTtVPrse/mie3y15blZUBPn1gxGiRBRJ3WD+zFwMaS1m8peyZwe8v6379BlrH2LYDF5VvxN4HDgU3K0MJ1VB+IfSb1G0t+ZnnNwQO0T7T9ImBbqqGmI2pir7MQeM5Qxx/CScCfgKm2N6AaMuo/Ht76rbr1PS0E3l2Sc9/PU23/bmUCKd/eX0Q1ZPQEpVf2WdvbAP9MNURzYN/mAQ45VPu1/luvR9VzWQw8VIrXban7jGEcdzFV8uw79tOoej+3D7hHNC4JIgZleyHwO+ALZdLzBcChPHHs+UWS3lQmKD8IPApcDjyN6oPhbwCSDqFMhrfYDHi/pLUk7Qs8Hzh/sJgk7SjpxeXb8EPAI1TzAgB3AFsOsvspwNGSppbJ7hdI2mSQ+muX9933swbV8Nb9wIOStgbeU7PfEZI2Kh/gHwDOKuUnA0dJ2ra8lw3L+x4WSetKegVwHvAHatpM0isl/ZOkcSXex2i/nQayl6SXlkn5o4ErbC+0/TeqD/O3qToR4B08MRHfAWxRM5nf50zgEEnTVJ22+/ly7FtWIsYYIUkQ0Y79gSlU3/J+DHza9gUt28+jmii+l2pu4k3l2+sNwPHA76k+IP4J+G2/Y18BTAXuAo4B9mljTmADqp7JvVTDEncD/1W2nQpsU4ZwflKz75eAs4FfUX1ongo8dZDXehB4uOVnN+AjwFuBB0ocZ9Xsdx4wl2oi9+fldbD9Y+A4qmGf+6l6VHsO8X5bfVXSA1Tt+WXgh8AeZWivv2cA55b3OZ9qrqnvosavAPuUM4JOHMbrn0l1MsE9VD2XA1q2vYuqJ3c3Vc+utVd0EXA98FdJd/U/qO0LgU+V97OEKrnsN4y4ogHKA4MiIqJOehAREVErCSIiImolQURERK0kiIiIqLVa35BrwoQJnjJlSrfDiIhYrcydO/cu25sOVW+1ThBTpkxhzpw53Q4jImK1IunWoWtliCkiIgaQBBEREbWSICIiolYSRERE1EqCiIiIWkkQERFRKwkiIiJqJUFEREStJIiIiKi1Wl9Jfe3tS5ly5M9X6Ri3HPvaEYomImJ0SQ8iIiJqNZYgyvN7/yDpaknXS/psKT9d0l8kzSs/00q5JJ0oaYGkayTt0FRsERExtCaHmB4FdrP9YHm4/GWSflG2HWH73H7196R6NvFU4MXASeV3RER0QWM9CFceLKtrlZ/BHoA9A/h22e9yYLykiU3FFxERg2t0DkLSOEnzgDuBC2xfUTYdU4aRTpC0dimbBCxs2X1RKet/zJmS5kias2LZ0ibDj4gY0xpNELZX2J4GbAHsJGk74Chga2BHYGPgY6W66g5Rc8xZtqfbnj5u3Q0bijwiIjpyFpPt+4BLgD1sLynDSI8C3wJ2KtUWAZNbdtsCWNyJ+CIi4smaPItpU0njy/JTgVcBf+qbV5AkYG/gurLLbODAcjbTzsBS20uaii8iIgbX5FlME4EzJI2jSkRn2/6ZpIskbUo1pDQPOKzUPx/YC1gALAMOaTC2iIgYQmMJwvY1wAtryncboL6B9zUVT0REDE+upI6IiFpJEBERUSsJIiIiaiVBRERErSSIiIiolQQRERG1kiAiIqJWEkRERNRKgoiIiFpJEBERUSsJIiIiaiVBRERErSSIiIiolQQRERG1kiAiIqJWEkRERNRKgoiIiFpJEBERUSsJIiIiaiVBRERErcYShKR1JP1B0tWSrpf02VL+bElXSLpJ0lmSnlLK1y7rC8r2KU3FFhERQ2uyB/EosJvt7YFpwB6SdgaOA06wPRW4Fzi01D8UuNf2c4ETSr2IiOiSxhKEKw+W1bXKj4HdgHNL+RnA3mV5RlmnbN9dkpqKLyIiBtfoHISkcZLmAXcCFwB/Bu6zvbxUWQRMKsuTgIUAZftSYJOaY86UNEfSnBXLljYZfkTEmNZogrC9wvY0YAtgJ+D5ddXK77regp9UYM+yPd329HHrbjhywUZExBN05Cwm2/cBlwA7A+MlrVk2bQEsLsuLgMkAZfuGwD2diC8iIp6sybOYNpU0viw/FXgVMB+4GNinVDsIOK8szy7rlO0X2X5SDyIiIjpjzaGrrLSJwBmSxlElorNt/0zSDcAPJH0OuAo4tdQ/FfiOpAVUPYf9GowtIiKG0FiCsH0N8MKa8pup5iP6lz8C7NtUPBERMTy5kjoiImolQURERK0kiIiIqJUEERERtZIgIiKiVhJERETUSoKIiIhaSRAREVErCSIiImolQURERK0kiIiIqJUEERERtZIgIiKiVhJERETUSoKIiIhaSRAREVErCSIiImolQURERK0kiIiIqJUEERERtRpLEJImS7pY0nxJ10v6QCn/jKTbJc0rP3u17HOUpAWSbpT0mqZii4iIoa3Z4LGXAx+2faWk9YG5ki4o206w/V+tlSVtA+wHbAtsDvw/SVvZXtFgjBERMYDGehC2l9i+siw/AMwHJg2yywzgB7Yftf0XYAGwU1PxRUTE4DoyByFpCvBC4IpSdLikaySdJmmjUjYJWNiy2yJqEoqkmZLmSJqzYtnSBqOOiBjbGk8QktYDfgh80Pb9wEnAc4BpwBLg+L6qNbv7SQX2LNvTbU8ft+6GDUUdERGNJghJa1Elh+/Z/hGA7Ttsr7D9OPBN/jGMtAiY3LL7FsDiJuOLiIiBDZkgJD1N0hpleStJbygf/EPtJ+BUYL7tL7WUT2yp9kbgurI8G9hP0tqSng1MBf7Q/luJiIiR1M5ZTJcCLytzBRcCc4C3AAcMsd9LgLcD10qaV8o+DuwvaRrV8NEtwLsBbF8v6WzgBqozoN6XM5giIrqnnQQh28skHQr8t+0vSrpqqJ1sX0b9vML5g+xzDHBMGzFFRETD2pmDkKRdqHoMPy9lTV4/ERERPaCdBPEB4Cjgx2UYaEvg4mbDioiIbhuyJ2D7Uqp5iL71m4H3NxlURER035AJQtJWwEeAKa31be/WXFgREdFt7cwlnAOcDJwC5KyiiIgxop0Esdz2SY1HEhERPaWdSeqfSnqvpImSNu77aTyyiIjoqnZ6EAeV30e0lBnYcuTDiYiIXtHOWUzP7kQgERHRW9o5i2kt4D3Ay0vRJcA3bD/WYFwREdFl7QwxnQSsBXy9rL+9lL2zqaAiIqL72kkQO9revmX9IklXNxVQRET0hnbOYloh6Tl9K+VWG7keIiJilGunB3EEcLGkm6nuzvos4JBGo4qIiK5r5yymCyVNBZ5HlSD+ZPvRxiOLiIiuGjBBSNrN9kWS3tRv03Mk0fcI0YiIGJ0G60G8ArgIeH3NNgNJEBERo9iACcL2p8vif9j+S+u28szoiIgYxdo5i+mHNWXnjnQgERHRWwabg9ga2BbYsN88xAbAOk0HFhER3TVYD+J5wOuA8VTzEH0/OwDvGurAkiZLuljSfEnXS/pAKd9Y0gWSbiq/NyrlknSipAWSrpG0w6q+uYiIWHmDzUGcB5wnaRfbv1+JYy8HPmz7SknrA3MlXQAcDFxo+1hJRwJHAh8D9gSmlp8XU93O48Ur8boRETEC2pmDOEzS+L4VSRtJOm2onWwvsX1lWX4AmA9MAmYAZ5RqZwB7l+UZwLdduRwYL2li+28lIiJGUjsJ4gW27+tbsX0v8MLhvIikKWWfK4Cn215SjrUE2KxUmwQsbNltUSnrf6yZkuZImrNi2dLhhBEREcPQToJYo2+eAKo5BNq7RUdf/fWozoT6oO37B6taU+YnFdizbE+3PX3cuhu2G0ZERAxTOx/0xwO/k9R3auu+wDHtHLw8S+KHwPdarry+Q9JE20vKENKdpXwRMLll9y2Axe28TkREjLwhexC2vw38K3AH1Yf5m2x/Z6j9JAk4FZhv+0stm2bzj8eYHgSc11J+YDmbaWdgad9QVEREdN5g10FsYPv+MqT0V+DMlm0b275niGO/hOrhQtdKmlfKPg4cC5wt6VDgNqoeCcD5wF7AAmAZuWNsRERXDTbEdCbVdRBzeeJcgMr6loMd2PZl1M8rAOxeU9/A+wY7ZkREdM5g10G8rvzOfZciIsagwYaYBr2Sue8ah4iIGJ0GG2I6vvxeB5gOXE01ZPQCqusZXtpsaBER0U0DnsVk+5W2XwncCuxQrj14EdUFbws6FWBERHRHOxfKbW372r4V29cB05oLKSIiekE7F8rNl3QK8F2qs5feRnVfpYiIGMXaSRCHAO8BPlDWL6W602pERIxiQyYI249IOhk43/aNHYgpIiJ6wJBzEJLeAMwD/qesT5M0u+nAIiKiu9qZpP40sBNwH4DtecCUBmOKiIge0E6CWG47D16IiBhj2pmkvk7SW4FxkqYC7wd+12xYERHRbe30IP4N2BZ4lOoGfkuBDzYZVEREdN+gPQhJ44DP2j4C+ERnQoqIiF4waA/C9grgRR2KJSIiekg7cxBXldNazwEe6itseYRoRESMQu0kiI2Bu4HdWsoMJEFERIxi7SSII2zf1XgkERHRUwacg5D0ekl/A66RtEjSP3cwroiI6LLBJqmPAV5me3PgX4EvDOfAkk6TdKek61rKPiPpdknzys9eLduOkrRA0o2SXjPcNxIRESNrsASx3PafAGxfAaw/zGOfDuxRU36C7Wnl53wASdsA+1Fdb7EH8PVyim1ERHTJYHMQm0n60EDrtr802IFtXyppSptxzAB+YPtR4C+SFlDd/+n3be4fEREjbLAexDepeg19P/3XV9bhkq4pQ1AblbJJwMKWOotKWUREdMmAPQjbn23g9U4CjqY6TfZo4HjgHYDqQqg7gKSZwEyAcRts2kCIEREB7d2LacTYvsP2CtuPU/VIdiqbFgGTW6puASwe4BizbE+3PX3cuhs2G3BExBjW0QQhaWLL6huBvjOcZgP7SVpb0rOBqcAfOhlbREQ8UTsXyq0USd8HdgUmSFpE9eChXSVNoxo+ugV4N4Dt6yWdDdwALAfeV+4DFRERXTJkgpD0SdufK8trlzONhmR7/5riUwepfwzVtRcREdEDBruS+qOSdgH2aSnOaacREWPEYD2IG4F9gS0l/QaYD2wi6Xm2b+xIdBER0TWDTVLfC3wcWEA1l3BiKT9SUh45GhExyg3Wg9iDamL5OcCXgKuBh2wf0onAIiKiuwbsQdj+uO3dqc42+i5VMtlU0mWSftqh+CIiokvaOc31l7b/CPxR0ntsv1TShKYDi4iI7hryQjnbH21ZPbiU5QFCERGj3LCupLZ9dVOBREREb+norTYiImL1kQQRERG1kiAiIqJWYzfrW11MOfLnq7T/Lce+doQiiYjoLelBRERErSSIiIiolQQRERG1kiAiIqJWEkRERNRKgoiIiFpJEBERUSsJIiIiajWWICSdJulOSde1lG0s6QJJN5XfG5VySTpR0gJJ10jaoam4IiKiPU32IE6neipdqyOBC21PBS4s6wB7AlPLz0zgpAbjioiINjSWIGxfCtzTr3gGcEZZPgPYu6X8265cDoyXNLGp2CIiYmidnoN4uu0lAOX3ZqV8ErCwpd6iUvYkkmZKmiNpzoplSxsNNiJiLOuVSWrVlLmuou1Ztqfbnj5u3Q0bDisiYuzqdIK4o2/oqPy+s5QvAia31NsCWNzh2CIiokWnE8Rs4KCyfBBwXkv5geVspp2BpX1DURER0R2NPQ9C0veBXYEJkhYBnwaOBc6WdChwG7BvqX4+sBewAFgGHNJUXBER0Z7GEoTt/QfYtHtNXQPvayqWiIgYvl6ZpI6IiB6TBBEREbXG/DOpV1WeaR0Ro1V6EBERUSsJIiIiaiVBRERErSSIiIiolQQRERG1kiAiIqJWEkRERNRKgoiIiFpJEBERUSsJIiIiaiVBRERErSSIiIiolQQRERG1kiAiIqJWEkRERNRKgoiIiFpJEBERUasrT5STdAvwALACWG57uqSNgbOAKcAtwJtt39uN+CIioruPHH2l7bta1o8ELrR9rKQjy/rHuhNa56zqI0shjy2NiGb00jOpZwC7luUzgEsYAwkiIqJVLz3nvltzEAZ+JWmupJml7Om2lwCU35vV7ShppqQ5kuasWLa0Q+FGRIw93epBvMT2YkmbARdI+lO7O9qeBcwCWHviVDcVYETEWNeVHoTtxeX3ncCPgZ2AOyRNBCi/7+xGbBERUel4gpD0NEnr9y0DrwauA2YDB5VqBwHndTq2iIj4h24MMT0d+LGkvtc/0/b/SPojcLakQ4HbgH27EFtERBQdTxC2bwa2rym/G9i90/FERES9XEkdERG1kiAiIqJWEkRERNTqpSupIyJWayNx65xekgQxCvTSpfkRMXokQUTEiBgNX1RGWw9gVWUOIiIiaiVBRERErQwxxagwGoY3InpNehAREVErPYjo+sRcvr0H5OmKvSgJIoIMUUXUSYKIrut2DyZGj/wtjazMQURERK30ICJGQIaoYjRKgojoAb0wNJIkFf1liCkiImolQURERK0kiIiIqJUEERERtXpuklrSHsBXgHHAKbaP7XJIEWNCL0yUR2/pqR6EpHHA14A9gW2A/SVt092oIiLGpp5KEMBOwALbN9v+X+AHwIwuxxQRMSb12hDTJGBhy/oi4MWtFSTNBGaW1UdvPe5113Uotl42Abir20F0WdogbdBnTLeDjgOGboNntXOsXksQqinzE1bsWcAsAElzbE/vRGC9LO2QNoC0QZ+0w8i1Qa8NMS0CJresbwEs7lIsERFjWq8liD8CUyU9W9JTgP2A2V2OKSJiTOqpISbbyyUdDvyS6jTX02xfP8guszoTWc9LO6QNIG3QJ+0wQm0g20PXioiIMafXhpgiIqJHJEFERESt1SJBSNpD0o2SFkg6smb72pLOKtuvkDSl81E2q402+JCkGyRdI+lCSW2d57y6GaodWurtI8mSRt3pju20gaQ3l7+H6yWd2ekYm9bG/4dnSrpY0lXl/8Re3YizSZJOk3SnpNprwVQ5sbTRNZJ2GPaL2O7pH6rJ6j8DWwJPAa4GtulX573AyWV5P+CsbsfdhTZ4JbBuWX7PaGuDdtuh1FsfuBS4HJje7bi78LcwFbgK2Kisb9btuLvQBrOA95TlbYBbuh13A+3wcmAH4LoBtu8F/ILq+rKdgSuG+xqrQw+indtvzADOKMvnArtLqrvobnU1ZBvYvtj2srJ6OdU1JKNNu7diORr4IvBIJ4PrkHba4F3A12zfC2D7zg7H2LR22sDABmV5Q0bh9VS2LwXuGaTKDODbrlwOjJc0cTivsTokiLrbb0waqI7t5cBSYJOORNcZ7bRBq0OpvjmMNkO2g6QXApNt/6yTgXVQO38LWwFbSfqtpMvLHZJHk3ba4DPA2yQtAs4H/q0zofWU4X5uPElPXQcxgCFvv9FmndVZ2+9P0tuA6cArGo2oOwZtB0lrACcAB3cqoC5o529hTaphpl2pepK/kbSd7fsajq1T2mmD/YHTbR8vaRfgO6UNHm8+vJ6xyp+Lq0MPop3bb/y9jqQ1qbqUg3W9Vjdt3YJE0quATwBvsP1oh2LrpKHaYX1gO+ASSbdQjbvOHmUT1e3+fzjP9mO2/wLcSJUwRot22uBQ4GwA278H1qG6gd1Yssq3LlodEkQ7t9+YDRxUlvcBLnKZpRklhmyDMrTyDarkMNrGnPsM2g62l9qeYHuK7SlUczFvsD2nO+E2op3/Dz+hOmkBSROohpxu7miUzWqnDW4DdgeQ9HyqBPG3jkbZfbOBA8vZTDsDS20vGc4Ben6IyQPcfkPSfwBzbM8GTqXqQi6g6jns172IR16bbfCfwHrAOWV+/jbbb+ha0A1osx1GtTbb4JfAqyXdAKwAjrB9d/eiHllttsGHgW9K+neqYZWDR9mXRiR9n2oYcUKZa/k0sBaA7ZOp5l72AhYAy4BDhv0ao6zNIiJihKwOQ0wREdEFSRAREVErCSIiImolQURERK0kiIiIqJUEEWOOpBWS5km6TtI5ktYd5v4PDrP+6ZL2qSmfLunEsnywpK+W5cMkHdhSvvlwXi9ipCRBxFj0sO1ptrcD/hc4rHVjubCo8f8btufYfn9N+cm2v11WDwaSIKIrkiBirPsN8FxJUyTNl/R14EpgsqT9JV1behrHte4k6XhJV5Znb2xayt4l6Y+Srpb0w349k1dJ+o2k/y/pdaX+rpKedFNBSZ+R9JHS65gOfK/0eF4r6cct9f5F0o9GvkkiKkkQMWaV+3btCVxbip5HdXvkFwKPAccBuwHTgB0l7V3qPQ240vYOwK+prmAF+JHtHW1vD8ynuh9QnylUN1B8LXCypHWGis/2ucAc4ADb06iujH1+X0KiujL2W8N+4xFtSoKIseipkuZRffjeRnWrFoBby33zAXYELrH9t3IL+e9RPaAF4HHgrLL8XeClZXm70ku4FjgA2LblNc+2/bjtm6jui7T1cIMut4r4DtVtrMcDuzA6b+sePaLn78UU0YCHyzfyvyv3r3qotWgYx+u7X83pwN62r5Z0MNV9cvrXGWi9Xd8Cfkr1MKRzSvKKaER6EBH1rgBeIWmCpHFUzxf4ddm2BtVdgwHeClxWltcHlkhai6oH0WpfSWtIeg7VozJvbDOOB8pxAbC9mOqWzZ+kSkgRjUkPIqKG7SWSjgIupupNnG/7vLL5IWBbSXOpnl74llL+KarEcivVvMb6LYe8kSrBPB04zPYjbT4V93SqOYuHgV1sP0w13LWp7RtW4S1GDCl3c41YzZTrJa6yfeqQlSNWQRJExGqk9FoeAv5llD41MHpIEkRERNTKJHVERNRKgoiIiFpJEBERUSsJIiIiaiVBRERErf8D8+iRWAcd0gQAAAAASUVORK5CYII=\n", "text/plain": [ "
" ] @@ -669,8 +695,8 @@ "output_type": "stream", "text": [ " y=1 y=2 \n", - " l=1 178 68 \n", - " l=2 53 701 \n" + " l=1 181 65 \n", + " l=2 56 698 \n" ] } ], @@ -737,8 +763,9 @@ ], "source": [ "from metal.end_model import EndModel\n", - "\n", - "end_model = EndModel([1000,10,2], seed=123)" + "import torch\n", + "use_cuda = torch.cuda.is_available()\n", + "end_model = EndModel([1000,10,2], seed=123, use_cuda=use_cuda)" ] }, { @@ -762,19 +789,78 @@ "scrolled": true }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 32/32 [00:00<00:00, 95.07it/s] \n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ "Saving model at iteration 0 with best score 0.992\n", - "[E:0]\tTrain Loss: 0.499\tDev score: 0.992\n", - "[E:1]\tTrain Loss: 0.461\tDev score: 0.947\n", - "[E:2]\tTrain Loss: 0.453\tDev score: 0.956\n", - "[E:3]\tTrain Loss: 0.451\tDev score: 0.974\n", - "[E:4]\tTrain Loss: 0.450\tDev score: 0.948\n", + "[E:0]\tTrain Loss: 0.508\tDev score: 0.992\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 32/32 [00:00<00:00, 99.93it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[E:1]\tTrain Loss: 0.470\tDev score: 0.928\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 32/32 [00:00<00:00, 99.20it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[E:2]\tTrain Loss: 0.465\tDev score: 0.949\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 32/32 [00:00<00:00, 97.21it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[E:3]\tTrain Loss: 0.461\tDev score: 0.969\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 32/32 [00:00<00:00, 96.68it/s] \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[E:4]\tTrain Loss: 0.460\tDev score: 0.954\n", "Restoring best model from iteration 0 with score 0.992\n", "Finished Training\n", - "Confusion Matrix (Dev)\n", + "Accuracy: 0.996\n", " y=1 y=2 \n", " l=1 244 2 \n", " l=2 2 752 \n" @@ -782,7 +868,7 @@ } ], "source": [ - "end_model.train(Xs[0], Y_train_ps, Xs[1], Ys[1], l2=0.1, batch_size=256, \n", + "end_model.train((Xs[0], Y_train_ps), dev_data=(Xs[1], Ys[1]), l2=0.1, batch_size=256, \n", " n_epochs=5, print_every=1, validation_metric='f1')" ] }, @@ -812,25 +898,31 @@ "output_type": "stream", "text": [ "Label Model:\n", - "Precision: 0.757\n", - "Recall: 0.695\n", - "F1: 0.725\n", + "Precision: 0.747\n", + "Recall: 0.707\n", + "F1: 0.727\n", + " y=1 y=2 \n", + " l=1 174 59 \n", + " l=2 72 695 \n", "\n", "End Model:\n", "Precision: 0.996\n", "Recall: 0.984\n", - "F1: 0.990\n" + "F1: 0.990\n", + " y=1 y=2 \n", + " l=1 242 1 \n", + " l=2 4 753 \n" ] } ], "source": [ "print(\"Label Model:\")\n", - "score = label_model.score(Ls[2], Ys[2], metric=['precision', 'recall', 'f1'])\n", + "score = label_model.score((Ls[2], Ys[2]), metric=['precision', 'recall', 'f1'])\n", "\n", "print()\n", "\n", "print(\"End Model:\")\n", - "score = end_model.score(Xs[2], Ys[2], metric=['precision', 'recall', 'f1'])" + "score = end_model.score((Xs[2], Ys[2]), metric=['precision', 'recall', 'f1'])" ] }, { diff --git a/tutorials/Multitask.ipynb b/tutorials/Multitask.ipynb index c754197d..97d01679 100644 --- a/tutorials/Multitask.ipynb +++ b/tutorials/Multitask.ipynb @@ -58,12 +58,11 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "import pickle\n", - "\n", "with open(\"data/multitask_tutorial.pkl\", 'rb') as f:\n", " Xs, Ys, Ls, Ds = pickle.load(f)" ] @@ -99,7 +98,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -123,7 +122,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -140,7 +139,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 7, "metadata": {}, "outputs": [ { @@ -149,14 +148,14 @@ "text": [ "Computing O...\n", "Estimating \\mu...\n", - "[E:0]\tTrain Loss: 4.034\n", - "[E:20]\tTrain Loss: 0.472\n", - "[E:40]\tTrain Loss: 0.111\n", - "[E:60]\tTrain Loss: 0.050\n", - "[E:80]\tTrain Loss: 0.034\n", - "[E:100]\tTrain Loss: 0.028\n", - "[E:120]\tTrain Loss: 0.027\n", - "[E:140]\tTrain Loss: 0.026\n", + "[E:0]\tTrain Loss: 2.785\n", + "[E:20]\tTrain Loss: 0.451\n", + "[E:40]\tTrain Loss: 0.053\n", + "[E:60]\tTrain Loss: 0.027\n", + "[E:80]\tTrain Loss: 0.026\n", + "[E:100]\tTrain Loss: 0.025\n", + "[E:120]\tTrain Loss: 0.025\n", + "[E:140]\tTrain Loss: 0.025\n", "[E:160]\tTrain Loss: 0.025\n", "[E:180]\tTrain Loss: 0.025\n", "[E:199]\tTrain Loss: 0.025\n", @@ -168,6 +167,31 @@ "label_model.train(Ls[0], n_epochs=200, print_every=20, seed=123)" ] }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[<100x10 sparse matrix of type ''\n", + " \twith 846 stored elements in Compressed Sparse Row format>,\n", + " <100x10 sparse matrix of type ''\n", + " \twith 846 stored elements in Compressed Sparse Row format>,\n", + " <100x10 sparse matrix of type ''\n", + " \twith 846 stored elements in Compressed Sparse Row format>]" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "Ls[2]" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -177,7 +201,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 9, "metadata": { "scrolled": false }, @@ -186,27 +210,27 @@ "name": "stdout", "output_type": "stream", "text": [ - "Accuracy: 0.910\n" + "Accuracy: 0.900\n" ] }, { "data": { "text/plain": [ - "0.91" + "0.9" ] }, - "execution_count": 16, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "label_model.score(Ls[1], Ys[1])" + "label_model.score((Ls[1], Ys[1]))" ] }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ @@ -243,7 +267,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -281,34 +305,97 @@ ], "source": [ "from metal.multitask import MTEndModel\n", - "\n", + "import torch\n", + "use_cuda = torch.cuda.is_available()\n", "end_model = MTEndModel([1000,100,10], task_graph=task_graph, seed=123)" ] }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 12, "metadata": { "scrolled": true }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 25/25 [00:00<00:00, 161.90it/s]\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "Saving model at iteration 0 with best score 0.940\n", - "[E:0]\tTrain Loss: 2.264\tDev score: 0.940\n", - "[E:1]\tTrain Loss: 1.352\tDev score: 0.917\n", - "[E:2]\tTrain Loss: 1.069\tDev score: 0.900\n", - "[E:3]\tTrain Loss: 0.962\tDev score: 0.853\n", - "[E:4]\tTrain Loss: 0.909\tDev score: 0.890\n", - "Restoring best model from iteration 0 with score 0.940\n", - "Finished Training\n" + "Saving model at iteration 0 with best score 0.833\n", + "[E:0]\tTrain Loss: 2.260\tDev score: 0.833\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 25/25 [00:00<00:00, 230.25it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Saving model at iteration 1 with best score 0.930\n", + "[E:1]\tTrain Loss: 1.334\tDev score: 0.930\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 25/25 [00:00<00:00, 259.97it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Saving model at iteration 2 with best score 0.937\n", + "[E:2]\tTrain Loss: 1.054\tDev score: 0.937\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 25/25 [00:00<00:00, 256.49it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[E:3]\tTrain Loss: 0.911\tDev score: 0.917\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 25/25 [00:00<00:00, 258.90it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[E:4]\tTrain Loss: 0.864\tDev score: 0.903\n", + "Restoring best model from iteration 2 with score 0.937\n", + "Finished Training\n", + "Accuracy: 0.937\n" ] } ], "source": [ - "end_model.train(Xs[0], Y_train_ps, Xs[1], Ys[1], n_epochs=5, seed=123)" + "end_model.train((Xs[0], Y_train_ps), dev_data=(Xs[1], Ys[1]), n_epochs=5, seed=123)" ] }, { @@ -327,7 +414,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 13, "metadata": { "scrolled": true }, @@ -337,21 +424,21 @@ "output_type": "stream", "text": [ "Label Model:\n", - "Accuracy: 0.880\n", + "Accuracy: 0.850\n", "\n", "End Model:\n", - "Accuracy: 0.917\n" + "Accuracy: 0.927\n" ] } ], "source": [ "print(\"Label Model:\")\n", - "score = label_model.score(Ls[2], Ys[2])\n", + "score = label_model.score((Ls[2], Ys[2]))\n", "\n", "print()\n", "\n", "print(\"End Model:\")\n", - "score = end_model.score(Xs[2], Ys[2])" + "score = end_model.score((Xs[2], Ys[2]))" ] }, { @@ -363,21 +450,21 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 14, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Accuracy (t=0): 0.940\n", - "Accuracy (t=1): 0.910\n", - "Accuracy (t=2): 0.900\n" + "Accuracy (t=0): 0.930\n", + "Accuracy (t=1): 0.920\n", + "Accuracy (t=2): 0.930\n" ] } ], "source": [ - "scores = end_model.score(Xs[2], Ys[2], reduce=None)" + "scores = end_model.score((Xs[2], Ys[2]), reduce=None)" ] }, { @@ -389,36 +476,36 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 15, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[array([2, 1, 1, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 1, 1,\n", + "[array([2, 1, 1, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2,\n", " 2, 2, 2, 2, 1, 2, 1, 1, 1, 2, 1, 2, 2, 1, 1, 2, 1, 2, 2, 1, 1, 1,\n", - " 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 1, 2, 1, 2, 2, 1, 1, 1, 1, 2, 2,\n", - " 1, 2, 1, 1, 2, 2, 1, 2, 1, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,\n", - " 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1]),\n", - " array([3, 2, 1, 3, 2, 1, 2, 2, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 2, 1,\n", - " 3, 3, 3, 3, 2, 3, 1, 2, 2, 3, 1, 3, 3, 2, 2, 3, 2, 3, 3, 1, 1, 1,\n", - " 2, 1, 1, 2, 1, 3, 3, 2, 2, 2, 3, 2, 3, 2, 1, 3, 1, 1, 1, 1, 3, 3,\n", - " 1, 3, 2, 2, 3, 3, 1, 3, 2, 3, 3, 2, 1, 3, 1, 2, 1, 3, 3, 3, 3, 3,\n", - " 3, 1, 2, 1, 1, 1, 1, 3, 3, 3, 2, 2]),\n", - " array([1, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 3, 2, 1, 2, 2, 1, 1, 3, 2, 3, 3,\n", - " 2, 1, 2, 2, 3, 2, 3, 3, 3, 2, 3, 1, 1, 3, 3, 1, 3, 1, 1, 3, 1, 3,\n", - " 3, 3, 3, 3, 3, 2, 2, 3, 3, 3, 2, 3, 2, 3, 1, 1, 3, 3, 3, 1, 1, 1,\n", - " 3, 1, 3, 3, 3, 2, 3, 2, 3, 2, 2, 3, 3, 1, 3, 3, 3, 1, 2, 1, 2, 1,\n", + " 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 1, 2, 1, 2, 2, 2, 1, 1, 2, 1, 2,\n", + " 1, 2, 1, 1, 2, 2, 1, 2, 1, 2, 2, 1, 1, 2, 1, 1, 1, 2, 2, 2, 2, 2,\n", + " 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1]),\n", + " array([3, 2, 1, 3, 2, 1, 2, 2, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 3,\n", + " 3, 3, 3, 3, 2, 3, 1, 2, 2, 3, 2, 3, 3, 2, 2, 3, 2, 3, 3, 1, 1, 1,\n", + " 2, 1, 1, 2, 1, 3, 3, 2, 2, 2, 3, 2, 3, 1, 3, 3, 3, 1, 2, 3, 3, 3,\n", + " 1, 3, 2, 2, 3, 3, 1, 3, 2, 3, 3, 3, 1, 3, 1, 2, 1, 3, 3, 3, 3, 3,\n", + " 3, 3, 2, 1, 1, 1, 1, 3, 3, 3, 2, 2]),\n", + " array([1, 3, 3, 1, 3, 3, 3, 3, 3, 2, 1, 1, 2, 1, 2, 2, 1, 1, 3, 3, 3, 1,\n", + " 2, 1, 2, 1, 3, 2, 3, 3, 3, 2, 3, 1, 1, 3, 3, 1, 3, 1, 1, 3, 3, 3,\n", + " 3, 3, 3, 3, 3, 2, 2, 3, 3, 3, 2, 3, 2, 3, 1, 1, 1, 3, 3, 1, 1, 1,\n", + " 3, 1, 3, 3, 1, 2, 3, 2, 3, 2, 2, 1, 3, 1, 3, 3, 3, 1, 2, 1, 2, 1,\n", " 2, 3, 3, 3, 3, 3, 3, 1, 1, 2, 3, 3])]" ] }, - "execution_count": 22, + "execution_count": 15, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "Y_p = end_model.predict(Xs[2], Ys[2])\n", + "Y_p = end_model.predict(Xs[2])\n", "Y_p" ] },