From 2153b73353c1b950b734086ee606e5b54056fd9d Mon Sep 17 00:00:00 2001 From: jdunnmon Date: Tue, 28 Aug 2018 16:42:38 -0700 Subject: [PATCH 01/35] fixed nltk typo, fixed featurizer init in contrib, added gpu support for end model --- .pre-commit-config.yaml | 19 ----------------- environment.yml | 3 ++- metal/classifier.py | 21 ++++++++++++++++++- .../featurizers/embedding_featurizer.py | 2 +- metal/contrib/featurizers/requirements.txt | 4 ++-- metal/end_model/end_model.py | 10 +++++++-- metal/end_model/loss.py | 7 ++++++- metal/label_model/lm_defaults.py | 2 ++ 8 files changed, 41 insertions(+), 27 deletions(-) delete mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index 68335bc4..00000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,19 +0,0 @@ -repos: -- repo: https://github.com/asottile/seed-isort-config - rev: master - hooks: - - id: seed-isort-config -- repo: https://github.com/pre-commit/mirrors-isort - rev: master - hooks: - - id: isort -- repo: https://github.com/ambv/black - rev: stable - hooks: - - id: black - args: [--line-length, '80'] - language_version: python3.6 -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: master - hooks: - - id: flake8 \ No newline at end of file diff --git a/environment.yml b/environment.yml index 99e757b5..67b14911 100644 --- a/environment.yml +++ b/environment.yml @@ -13,4 +13,5 @@ dependencies: - pandas - pytorch=0.4.1 - runipy - - scipy \ No newline at end of file + - scipy + - tqdm diff --git a/metal/classifier.py b/metal/classifier.py index 393a6dfe..cd850f24 100644 --- a/metal/classifier.py +++ b/metal/classifier.py @@ -5,6 +5,7 @@ import torch import torch.nn as nn import torch.optim as optim +from tqdm import tqdm from metal.analysis import confusion_matrix from metal.metrics import metric_score @@ -141,11 +142,29 @@ def _train(self, train_loader, loss_fn, X_dev=None, Y_dev=None): checkpointer = Checkpointer( model_class, **checkpoint_config, verbose=self.config["verbose"] ) + + # Moving model and dev data to GPU + if train_config['use_cuda']: + if evaluate_dev: + X_dev = X_dev.cuda() + Y_dev = Y_dev.cuda() + + if self.config['verbose']: + print('Using GPU...') + + self.cuda() # Train the model for epoch in range(train_config["n_epochs"]): epoch_loss = 0.0 - for data in train_loader: + if self.config['verbose']: + print(f'Training epoch {epoch}...') + for batch, data in tqdm(enumerate(train_loader), total=len(train_loader)): + + # moving data to GPU + if train_config['use_cuda']: + data = [d.cuda() for d in data] + # Zero the parameter gradients optimizer.zero_grad() diff --git a/metal/contrib/featurizers/embedding_featurizer.py b/metal/contrib/featurizers/embedding_featurizer.py index f050d8af..c12b7f48 100644 --- a/metal/contrib/featurizers/embedding_featurizer.py +++ b/metal/contrib/featurizers/embedding_featurizer.py @@ -5,7 +5,7 @@ from torch.nn.utils.rnn import pad_sequence from torchtext.vocab import Vocab -from metal.contrib.featurizers import Featurizer +from metal.contrib.featurizers.featurizer import Featurizer class EmbeddingFeaturizer(Featurizer): diff --git a/metal/contrib/featurizers/requirements.txt b/metal/contrib/featurizers/requirements.txt index 9accbf14..970e724f 100644 --- a/metal/contrib/featurizers/requirements.txt +++ b/metal/contrib/featurizers/requirements.txt @@ -1,3 +1,3 @@ torchtext==0.2.3 -ntlk -scikit-learn \ No newline at end of file +nltk +scikit-learn diff --git a/metal/end_model/end_model.py b/metal/end_model/end_model.py index 033f22b7..d41a7b59 100644 --- a/metal/end_model/end_model.py +++ b/metal/end_model/end_model.py @@ -72,7 +72,7 @@ def _build(self, input_module, middle_modules, head_module): self.network = nn.Sequential(input_layer, *middle_layers, head) # Construct loss module - self.criteria = SoftCrossEntropyLoss(reduction="sum") + self.criteria = SoftCrossEntropyLoss(reduction="sum", use_cuda=self.config['train_config']['use_cuda']) def _build_input_layer(self, input_module): if input_module is None: @@ -164,7 +164,13 @@ def _make_data_loader(self, X, Y, data_loader_config): return data_loader def _get_loss_fn(self): - loss_fn = lambda X, Y: self.criteria(self.forward(X), Y) + if hasattr(self.config, 'use_cuda'): + if self.config['use_cuda']: + criteria = self.criteria.cuda() + else: + criteria = self.criteria + loss_fn = lambda X, Y: criteria(self.forward(X), Y) + return loss_fn def train(self, X_train, Y_train, X_dev=None, Y_dev=None, **kwargs): diff --git a/metal/end_model/loss.py b/metal/end_model/loss.py index d5787520..22016968 100644 --- a/metal/end_model/loss.py +++ b/metal/end_model/loss.py @@ -18,17 +18,22 @@ class SoftCrossEntropyLoss(nn.Module): target: An [n, k] float tensor of target probabilities """ - def __init__(self, weight=None, reduction="elementwise_mean"): + def __init__(self, weight=None, reduction="elementwise_mean", use_cuda=False): super().__init__() assert weight is None or isinstance(weight, torch.FloatTensor) self.weight = weight self.reduction = reduction + self.use_cuda = use_cuda def forward(self, input, target): n, k = input.shape cum_losses = torch.zeros(n) + if self.use_cuda: + cum_losses = cum_losses.cuda() for y in range(k): cls_idx = torch.full((n,), y, dtype=torch.long) + if self.use_cuda: + cls_idx = cls_idx.cuda() y_loss = F.cross_entropy(input, cls_idx, reduction="none") if self.weight is not None: y_loss = y_loss * self.weight[y] diff --git a/metal/label_model/lm_defaults.py b/metal/label_model/lm_defaults.py index e358cbf3..744671e8 100644 --- a/metal/label_model/lm_defaults.py +++ b/metal/label_model/lm_defaults.py @@ -26,5 +26,7 @@ # Train loop "n_epochs": 100, "print_every": 10, + # GPU + "use_cuda": False, }, } From df50a3d564a20fe368f4d72e8345362791b75bfa Mon Sep 17 00:00:00 2001 From: jdunnmon Date: Tue, 28 Aug 2018 16:45:18 -0700 Subject: [PATCH 02/35] re-add pre commit hook --- .isort.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.isort.cfg b/.isort.cfg index 6d1942f3..ee7505c0 100644 --- a/.isort.cfg +++ b/.isort.cfg @@ -4,4 +4,4 @@ include_trailing_comma=True force_grid_wrap=0 combine_as_imports=True line_length=80 -known_third_party=matplotlib,networkx,nltk,numpy,pandas,scipy,setuptools,sklearn,torch,torchtext \ No newline at end of file +known_third_party=matplotlib,networkx,nltk,numpy,pandas,scipy,setuptools,sklearn,torch,torchtext,tqdm \ No newline at end of file From 1f53c78b28e00fd53819723069acd72f055fb163 Mon Sep 17 00:00:00 2001 From: jdunnmon Date: Tue, 28 Aug 2018 16:46:01 -0700 Subject: [PATCH 03/35] re-add pre commit hook --- .pre-commit-config.yaml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..68335bc4 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,19 @@ +repos: +- repo: https://github.com/asottile/seed-isort-config + rev: master + hooks: + - id: seed-isort-config +- repo: https://github.com/pre-commit/mirrors-isort + rev: master + hooks: + - id: isort +- repo: https://github.com/ambv/black + rev: stable + hooks: + - id: black + args: [--line-length, '80'] + language_version: python3.6 +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: master + hooks: + - id: flake8 \ No newline at end of file From 7bc4ec0e3edc49b130aa6f2d2fe99dedb724c429 Mon Sep 17 00:00:00 2001 From: jdunnmon Date: Tue, 28 Aug 2018 20:39:06 -0700 Subject: [PATCH 04/35] updated score method to handle cuda input --- metal/classifier.py | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/metal/classifier.py b/metal/classifier.py index cd850f24..35d2a899 100644 --- a/metal/classifier.py +++ b/metal/classifier.py @@ -142,29 +142,31 @@ def _train(self, train_loader, loss_fn, X_dev=None, Y_dev=None): checkpointer = Checkpointer( model_class, **checkpoint_config, verbose=self.config["verbose"] ) - + # Moving model and dev data to GPU - if train_config['use_cuda']: + if train_config["use_cuda"]: if evaluate_dev: X_dev = X_dev.cuda() Y_dev = Y_dev.cuda() - - if self.config['verbose']: - print('Using GPU...') - + + if self.config["verbose"]: + print("Using GPU...") + self.cuda() # Train the model for epoch in range(train_config["n_epochs"]): epoch_loss = 0.0 - if self.config['verbose']: - print(f'Training epoch {epoch}...') - for batch, data in tqdm(enumerate(train_loader), total=len(train_loader)): + if self.config["verbose"]: + print(f"Training epoch {epoch}...") + for batch, data in tqdm( + enumerate(train_loader), total=len(train_loader) + ): # moving data to GPU - if train_config['use_cuda']: + if train_config["use_cuda"]: data = [d.cuda() for d in data] - + # Zero the parameter gradients optimizer.zero_grad() @@ -252,7 +254,9 @@ def _train(self, train_loader, loss_fn, X_dev=None, Y_dev=None): if not self.multitask: print("Confusion Matrix (Dev)") - confusion_matrix(Y_p_dev, Y_dev, pretty_print=True) + confusion_matrix( + Y_p_dev, self._to_numpy(Y_dev), pretty_print=True + ) def _set_optimizer(self, optimizer_config): opt = optimizer_config["optimizer"] @@ -314,6 +318,10 @@ def score( Returns: scores: A (float) score """ + + if self.config["train_config"]["use_cuda"]: + X = X.cuda() + Y = self._to_numpy(Y) Y_p = self.predict(X, break_ties=break_ties, **kwargs) @@ -392,7 +400,7 @@ def _to_numpy(Z): elif isinstance(Z, list): return np.array(Z) elif isinstance(Z, torch.Tensor): - return Z.numpy() + return Z.cpu().numpy() else: msg = ( f"Expected None, list, numpy.ndarray or torch.Tensor, " From c3a8e59a6ca334d69dd4567ebed6b34239635720 Mon Sep 17 00:00:00 2001 From: jdunnmon Date: Tue, 28 Aug 2018 21:21:40 -0700 Subject: [PATCH 05/35] added disable_prog_bar option in classifier --- metal/classifier.py | 6 +++--- metal/end_model/em_defaults.py | 1 + metal/label_model/lm_defaults.py | 1 + 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/metal/classifier.py b/metal/classifier.py index 35d2a899..29bb8e9b 100644 --- a/metal/classifier.py +++ b/metal/classifier.py @@ -157,10 +157,10 @@ def _train(self, train_loader, loss_fn, X_dev=None, Y_dev=None): # Train the model for epoch in range(train_config["n_epochs"]): epoch_loss = 0.0 - if self.config["verbose"]: - print(f"Training epoch {epoch}...") for batch, data in tqdm( - enumerate(train_loader), total=len(train_loader) + enumerate(train_loader), + total=len(train_loader), + disable=train_config["disable_prog_bar"], ): # moving data to GPU diff --git a/metal/end_model/em_defaults.py b/metal/end_model/em_defaults.py index 8395c914..d4ee7f19 100644 --- a/metal/end_model/em_defaults.py +++ b/metal/end_model/em_defaults.py @@ -18,6 +18,7 @@ "train_config": { # Display "print_every": 1, # Print after this many epochs + "disable_prog_bar": False, # Disable progress bar each epoch # GPU "use_cuda": False, # Dataloader diff --git a/metal/label_model/lm_defaults.py b/metal/label_model/lm_defaults.py index 744671e8..806a6a7f 100644 --- a/metal/label_model/lm_defaults.py +++ b/metal/label_model/lm_defaults.py @@ -26,6 +26,7 @@ # Train loop "n_epochs": 100, "print_every": 10, + "disable_prog_bar": True, # Disable progress bar each epoch # GPU "use_cuda": False, }, From a68fa1722f51119fe08a3fbbae4e395ae4861d92 Mon Sep 17 00:00:00 2001 From: Jared Date: Sun, 2 Sep 2018 02:56:34 -0700 Subject: [PATCH 06/35] changes to end model and classifier to allow for use of tuple or DataLoader input --- metal/classifier.py | 120 +++++++++++++++++++++++++++-------- metal/end_model/end_model.py | 44 +++++++------ 2 files changed, 118 insertions(+), 46 deletions(-) diff --git a/metal/classifier.py b/metal/classifier.py index 29bb8e9b..92a19a66 100644 --- a/metal/classifier.py +++ b/metal/classifier.py @@ -5,11 +5,12 @@ import torch import torch.nn as nn import torch.optim as optim +from torch.utils.data.dataloader import DataLoader from tqdm import tqdm from metal.analysis import confusion_matrix from metal.metrics import metric_score -from metal.utils import Checkpointer, recursive_merge_dicts +from metal.utils import Checkpointer, recursive_merge_dicts, hard_to_soft class Classifier(nn.Module): @@ -111,21 +112,21 @@ def train(self, *args, **kwargs): """ raise NotImplementedError - def _train(self, train_loader, loss_fn, X_dev=None, Y_dev=None): + def _train(self, train_loader, loss_fn, dev_loader=None): """The internal training routine called by train() after initial setup Args: train_loader: a torch DataLoader of X (data) and Y (labels) for the train split loss_fn: the loss function to minimize (maps *data -> loss) - X_dev: the dev set model input - Y_dev: the dev set target labels + dev_loader: a torch DataLoader of X (data) and Y (labels) for + the dev splot - If either of X_dev or Y_dev is not provided, then no checkpointing or + If dev_loader is not provided, then no checkpointing or evaluation on the dev set will occur. """ train_config = self.config["train_config"] - evaluate_dev = X_dev is not None and Y_dev is not None + evaluate_dev = dev_loader is not None # Set the optimizer optimizer_config = train_config["optimizer_config"] @@ -143,11 +144,8 @@ def _train(self, train_loader, loss_fn, X_dev=None, Y_dev=None): model_class, **checkpoint_config, verbose=self.config["verbose"] ) - # Moving model and dev data to GPU + # Moving model to GPU if train_config["use_cuda"]: - if evaluate_dev: - X_dev = X_dev.cuda() - Y_dev = Y_dev.cuda() if self.config["verbose"]: print("Using GPU...") @@ -163,6 +161,9 @@ def _train(self, train_loader, loss_fn, X_dev=None, Y_dev=None): disable=train_config["disable_prog_bar"], ): + # converting hard to soft labels for training + data[1] = self._preprocess_Y(data[1], self.k) + # moving data to GPU if train_config["use_cuda"]: data = [d.cuda() for d in data] @@ -216,8 +217,9 @@ def _train(self, train_loader, loss_fn, X_dev=None, Y_dev=None): if evaluate_dev and (epoch % train_config["validation_freq"] == 0): val_metric = train_config["validation_metric"] dev_score = self.score( - X_dev, Y_dev, metric=val_metric, verbose=False + dev_loader, metric=val_metric, verbose=False ) + if train_config["checkpoint"]: checkpointer.checkpoint(self, epoch, dev_score) @@ -250,14 +252,15 @@ def _train(self, train_loader, loss_fn, X_dev=None, Y_dev=None): print("Finished Training") if evaluate_dev: - Y_p_dev = self.predict(X_dev) - + # Currently using default random break ties in evaluate + Y_p_dev, Y_dev = self.evaluate(dev_loader) + if not self.multitask: print("Confusion Matrix (Dev)") confusion_matrix( - Y_p_dev, self._to_numpy(Y_dev), pretty_print=True + Y_p_dev, Y_dev, pretty_print=True ) - + def _set_optimizer(self, optimizer_config): opt = optimizer_config["optimizer"] if opt == "sgd": @@ -294,10 +297,79 @@ def _set_scheduler(self, scheduler_config, optimizer): ) return lr_scheduler + def _preprocess_Y(self, Y, k): + """Convert Y to soft labels if necessary""" + Y = Y.clone() + + # If hard labels, convert to soft labels + if Y.dim() == 1 or Y.shape[1] == 1: + Y = hard_to_soft(Y.long(), k=k) + return Y + + def _batch_evaluate(self, loader, break_ties='random', **kwargs): + """Evaluates the model using minibatches + + Args: + loader: Pytorch DataLoader supplying (X,Y): + X: The input for the predict method + Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels in + {1,...,k}; can be None for cases with no ground truth + + Returns: + Y_p: an np.ndarray of predictions + Y: an np.ndarray of ground truth labels + """ + Y = [] + Y_p = [] + for batch, data in enumerate(loader): + X_batch, Y_batch = data + + if self.config["train_config"]["use_cuda"]: + X_batch = X_batch.cuda() + + Y.append(self._to_numpy(Y_batch)) + Y_p.append(self._to_numpy( + self.predict(X_batch, break_ties=break_ties, **kwargs))) + + Y = np.hstack(Y) + Y_p = np.hstack(Y_p) + + return Y_p, Y + + def evaluate(self, data, break_ties='random', **kwargs): + """Evaluates the model + + Args: + data: either a Pytorch DataLoader or tuple supplying (X,Y): + X: The input for the predict method + Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels in + {1,...,k} + + Returns: + Y_p: an np.ndarray of predictions + Y: an np.ndarray of ground truth labels + """ + + if type(data) is tuple: + X,Y = data + if self.config["train_config"]["use_cuda"]: + X = X.cuda() + + Y = self._to_numpy(Y) + Y_p = self.predict(X, break_ties=break_ties, **kwargs) + + elif type(data) is DataLoader: + Y_p, Y = self._batch_evaluate(data, break_ties=break_ties) + + else: + raise ValueError( + 'Unrecognized input data structure, use tuple or DataLoader!') + + return Y_p, Y + def score( self, - X, - Y, + data, metric=["accuracy"], break_ties="random", verbose=True, @@ -306,9 +378,10 @@ def score( """Scores the predictive performance of the Classifier on all tasks Args: - X: The input for the predict method - Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels in - {1,...,k} + data: either a Pytorch DataLoader or tuple supplying (X,Y): + X: The input for the predict method + Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels in + {1,...,k} metric: A metric (string) with which to score performance or a list of such metrics break_ties: How to break ties when making predictions @@ -319,12 +392,7 @@ def score( scores: A (float) score """ - if self.config["train_config"]["use_cuda"]: - X = X.cuda() - - Y = self._to_numpy(Y) - Y_p = self.predict(X, break_ties=break_ties, **kwargs) - + Y_p, Y = self.evaluate(data, break_ties=break_ties) metric_list = metric if isinstance(metric, list) else [metric] scores = [] for metric in metric_list: diff --git a/metal/end_model/end_model.py b/metal/end_model/end_model.py index d41a7b59..b55f73d8 100644 --- a/metal/end_model/end_model.py +++ b/metal/end_model/end_model.py @@ -7,13 +7,12 @@ from metal.end_model.em_defaults import em_default_config from metal.end_model.loss import SoftCrossEntropyLoss from metal.modules import IdentityModule -from metal.utils import MetalDataset, hard_to_soft, recursive_merge_dicts +from metal.utils import MetalDataset, recursive_merge_dicts class EndModel(Classifier): """A dynamically constructed discriminative classifier - Args: layer_out_dims: a list of integers corresponding to the output sizes of the layers of your network. The first element is the dimensionality of the input layer, the last element is the @@ -149,17 +148,8 @@ def update_config(self, update_dict): """Updates self.config with the values in a given update dictionary""" self.config = recursive_merge_dicts(self.config, update_dict) - def _preprocess_Y(self, Y, k): - """Convert Y to soft labels if necessary""" - Y = Y.clone() - - # If hard labels, convert to soft labels - if Y.dim() == 1 or Y.shape[1] == 1: - Y = hard_to_soft(Y.long(), k=k) - return Y - def _make_data_loader(self, X, Y, data_loader_config): - dataset = MetalDataset(X, self._preprocess_Y(Y, self.k)) + dataset = MetalDataset(X, Y) data_loader = DataLoader(dataset, shuffle=True, **data_loader_config) return data_loader @@ -172,17 +162,31 @@ def _get_loss_fn(self): loss_fn = lambda X, Y: criteria(self.forward(X), Y) return loss_fn + + def _convert_input_data(self, data): + if type(data) is tuple: + X,Y = data + Y = self._to_torch(Y, dtype=torch.FloatTensor) + loader_config = self.config['train_config']['data_loader_config'] + loader = self._make_data_loader(X, Y, loader_config) + elif type(data) is DataLoader: + loader = data + else: + raise ValueError( + 'Unrecognized input data structure, use tuple or DataLoader!') + return loader - def train(self, X_train, Y_train, X_dev=None, Y_dev=None, **kwargs): + def train(self, train_data, dev_data=None, **kwargs): + self.config = recursive_merge_dicts(self.config, kwargs) train_config = self.config["train_config"] - Y_train = self._to_torch(Y_train, dtype=torch.FloatTensor) - Y_dev = self._to_torch(Y_dev) - - # Make data loaders - loader_config = train_config["data_loader_config"] - train_loader = self._make_data_loader(X_train, Y_train, loader_config) + # Convert input data to data loaders + train_loader = self._convert_input_data(train_data) + if dev_data is not None: + dev_loader = self._convert_input_data(dev_data) + else: + dev_loader = None # Initialize the model self.reset() @@ -191,7 +195,7 @@ def train(self, X_train, Y_train, X_dev=None, Y_dev=None, **kwargs): loss_fn = self._get_loss_fn() # Execute training procedure - self._train(train_loader, loss_fn, X_dev=X_dev, Y_dev=Y_dev) + self._train(train_loader, loss_fn, dev_loader=dev_loader) def predict_proba(self, X): """Returns a [n, k] tensor of soft (float) predictions.""" From d44ff5ae38da693a98907d680b9b64672af1f6a6 Mon Sep 17 00:00:00 2001 From: Jared Date: Sun, 2 Sep 2018 02:59:57 -0700 Subject: [PATCH 07/35] typo in docstring --- metal/classifier.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metal/classifier.py b/metal/classifier.py index 92a19a66..8a396773 100644 --- a/metal/classifier.py +++ b/metal/classifier.py @@ -120,7 +120,7 @@ def _train(self, train_loader, loss_fn, dev_loader=None): the train split loss_fn: the loss function to minimize (maps *data -> loss) dev_loader: a torch DataLoader of X (data) and Y (labels) for - the dev splot + the dev split If dev_loader is not provided, then no checkpointing or evaluation on the dev set will occur. From be8ea6a948b91a7d1204eb2ae90610b38794eea7 Mon Sep 17 00:00:00 2001 From: Jared Date: Sun, 2 Sep 2018 03:12:08 -0700 Subject: [PATCH 08/35] removed precommit hook --- .pre-commit-config.yaml | 19 ------------------- metal/classifier.py | 2 +- 2 files changed, 1 insertion(+), 20 deletions(-) delete mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index 68335bc4..00000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,19 +0,0 @@ -repos: -- repo: https://github.com/asottile/seed-isort-config - rev: master - hooks: - - id: seed-isort-config -- repo: https://github.com/pre-commit/mirrors-isort - rev: master - hooks: - - id: isort -- repo: https://github.com/ambv/black - rev: stable - hooks: - - id: black - args: [--line-length, '80'] - language_version: python3.6 -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: master - hooks: - - id: flake8 \ No newline at end of file diff --git a/metal/classifier.py b/metal/classifier.py index 8a396773..6c79515b 100644 --- a/metal/classifier.py +++ b/metal/classifier.py @@ -252,7 +252,7 @@ def _train(self, train_loader, loss_fn, dev_loader=None): print("Finished Training") if evaluate_dev: - # Currently using default random break ties in evaluate + # Currently use default random break ties in evaluate Y_p_dev, Y_dev = self.evaluate(dev_loader) if not self.multitask: From a4031e4eb01abba241ca2059f559d64fb85f1a87 Mon Sep 17 00:00:00 2001 From: Jared Date: Sun, 2 Sep 2018 03:14:01 -0700 Subject: [PATCH 09/35] re-added pre-commit hook --- .pre-commit-config.yaml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..68335bc4 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,19 @@ +repos: +- repo: https://github.com/asottile/seed-isort-config + rev: master + hooks: + - id: seed-isort-config +- repo: https://github.com/pre-commit/mirrors-isort + rev: master + hooks: + - id: isort +- repo: https://github.com/ambv/black + rev: stable + hooks: + - id: black + args: [--line-length, '80'] + language_version: python3.6 +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: master + hooks: + - id: flake8 \ No newline at end of file From 362b523412d1473bec0aa3a269af25aa3e40d77a Mon Sep 17 00:00:00 2001 From: Jared Date: Sun, 2 Sep 2018 03:31:40 -0700 Subject: [PATCH 10/35] style fix --- metal/classifier.py | 64 +++++++++++++++++++----------------- metal/end_model/end_model.py | 22 +++++++------ metal/end_model/loss.py | 4 ++- 3 files changed, 48 insertions(+), 42 deletions(-) diff --git a/metal/classifier.py b/metal/classifier.py index 6c79515b..3f16fb43 100644 --- a/metal/classifier.py +++ b/metal/classifier.py @@ -10,7 +10,7 @@ from metal.analysis import confusion_matrix from metal.metrics import metric_score -from metal.utils import Checkpointer, recursive_merge_dicts, hard_to_soft +from metal.utils import Checkpointer, hard_to_soft, recursive_merge_dicts class Classifier(nn.Module): @@ -119,7 +119,7 @@ def _train(self, train_loader, loss_fn, dev_loader=None): train_loader: a torch DataLoader of X (data) and Y (labels) for the train split loss_fn: the loss function to minimize (maps *data -> loss) - dev_loader: a torch DataLoader of X (data) and Y (labels) for + dev_loader: a torch DataLoader of X (data) and Y (labels) for the dev split If dev_loader is not provided, then no checkpointing or @@ -163,7 +163,7 @@ def _train(self, train_loader, loss_fn, dev_loader=None): # converting hard to soft labels for training data[1] = self._preprocess_Y(data[1], self.k) - + # moving data to GPU if train_config["use_cuda"]: data = [d.cuda() for d in data] @@ -219,7 +219,7 @@ def _train(self, train_loader, loss_fn, dev_loader=None): dev_score = self.score( dev_loader, metric=val_metric, verbose=False ) - + if train_config["checkpoint"]: checkpointer.checkpoint(self, epoch, dev_score) @@ -254,13 +254,11 @@ def _train(self, train_loader, loss_fn, dev_loader=None): if evaluate_dev: # Currently use default random break ties in evaluate Y_p_dev, Y_dev = self.evaluate(dev_loader) - + if not self.multitask: print("Confusion Matrix (Dev)") - confusion_matrix( - Y_p_dev, Y_dev, pretty_print=True - ) - + confusion_matrix(Y_p_dev, Y_dev, pretty_print=True) + def _set_optimizer(self, optimizer_config): opt = optimizer_config["optimizer"] if opt == "sgd": @@ -305,15 +303,15 @@ def _preprocess_Y(self, Y, k): if Y.dim() == 1 or Y.shape[1] == 1: Y = hard_to_soft(Y.long(), k=k) return Y - - def _batch_evaluate(self, loader, break_ties='random', **kwargs): + + def _batch_evaluate(self, loader, break_ties="random", **kwargs): """Evaluates the model using minibatches Args: - loader: Pytorch DataLoader supplying (X,Y): + loader: Pytorch DataLoader supplying (X,Y): X: The input for the predict method - Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels in - {1,...,k}; can be None for cases with no ground truth + Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels + in {1,...,k}; can be None for cases with no ground truth Returns: Y_p: an np.ndarray of predictions @@ -328,45 +326,49 @@ def _batch_evaluate(self, loader, break_ties='random', **kwargs): X_batch = X_batch.cuda() Y.append(self._to_numpy(Y_batch)) - Y_p.append(self._to_numpy( - self.predict(X_batch, break_ties=break_ties, **kwargs))) - + Y_p.append( + self._to_numpy( + self.predict(X_batch, break_ties=break_ties, **kwargs) + ) + ) + Y = np.hstack(Y) Y_p = np.hstack(Y_p) return Y_p, Y - - def evaluate(self, data, break_ties='random', **kwargs): + + def evaluate(self, data, break_ties="random", **kwargs): """Evaluates the model Args: data: either a Pytorch DataLoader or tuple supplying (X,Y): X: The input for the predict method - Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels in - {1,...,k} + Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels + in {1,...,k} Returns: Y_p: an np.ndarray of predictions Y: an np.ndarray of ground truth labels - """ - + """ + if type(data) is tuple: - X,Y = data + X, Y = data if self.config["train_config"]["use_cuda"]: X = X.cuda() Y = self._to_numpy(Y) Y_p = self.predict(X, break_ties=break_ties, **kwargs) - + elif type(data) is DataLoader: Y_p, Y = self._batch_evaluate(data, break_ties=break_ties) - + else: raise ValueError( - 'Unrecognized input data structure, use tuple or DataLoader!') - + "Unrecognized input data structure, use tuple or DataLoader!" + ) + return Y_p, Y - + def score( self, data, @@ -380,8 +382,8 @@ def score( Args: data: either a Pytorch DataLoader or tuple supplying (X,Y): X: The input for the predict method - Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels in - {1,...,k} + Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels + in {1,...,k} metric: A metric (string) with which to score performance or a list of such metrics break_ties: How to break ties when making predictions diff --git a/metal/end_model/end_model.py b/metal/end_model/end_model.py index b55f73d8..6937b322 100644 --- a/metal/end_model/end_model.py +++ b/metal/end_model/end_model.py @@ -71,7 +71,9 @@ def _build(self, input_module, middle_modules, head_module): self.network = nn.Sequential(input_layer, *middle_layers, head) # Construct loss module - self.criteria = SoftCrossEntropyLoss(reduction="sum", use_cuda=self.config['train_config']['use_cuda']) + self.criteria = SoftCrossEntropyLoss( + reduction="sum", use_cuda=self.config["train_config"]["use_cuda"] + ) def _build_input_layer(self, input_module): if input_module is None: @@ -154,32 +156,32 @@ def _make_data_loader(self, X, Y, data_loader_config): return data_loader def _get_loss_fn(self): - if hasattr(self.config, 'use_cuda'): - if self.config['use_cuda']: + if hasattr(self.config, "use_cuda"): + if self.config["use_cuda"]: criteria = self.criteria.cuda() else: criteria = self.criteria loss_fn = lambda X, Y: criteria(self.forward(X), Y) - + return loss_fn - + def _convert_input_data(self, data): if type(data) is tuple: - X,Y = data + X, Y = data Y = self._to_torch(Y, dtype=torch.FloatTensor) - loader_config = self.config['train_config']['data_loader_config'] + loader_config = self.config["train_config"]["data_loader_config"] loader = self._make_data_loader(X, Y, loader_config) elif type(data) is DataLoader: loader = data else: raise ValueError( - 'Unrecognized input data structure, use tuple or DataLoader!') + "Unrecognized input data structure, use tuple or DataLoader!" + ) return loader def train(self, train_data, dev_data=None, **kwargs): - + self.config = recursive_merge_dicts(self.config, kwargs) - train_config = self.config["train_config"] # Convert input data to data loaders train_loader = self._convert_input_data(train_data) diff --git a/metal/end_model/loss.py b/metal/end_model/loss.py index 22016968..d9612bb4 100644 --- a/metal/end_model/loss.py +++ b/metal/end_model/loss.py @@ -18,7 +18,9 @@ class SoftCrossEntropyLoss(nn.Module): target: An [n, k] float tensor of target probabilities """ - def __init__(self, weight=None, reduction="elementwise_mean", use_cuda=False): + def __init__( + self, weight=None, reduction="elementwise_mean", use_cuda=False + ): super().__init__() assert weight is None or isinstance(weight, torch.FloatTensor) self.weight = weight From 7d61e428ab62b2eeec164ce0aaa0854781954d2a Mon Sep 17 00:00:00 2001 From: Jared Date: Sun, 2 Sep 2018 22:53:39 -0700 Subject: [PATCH 11/35] refactor to break ties --- metal/classifier.py | 26 ++++++++++++-------------- metal/end_model/end_model.py | 13 +++++++++++-- 2 files changed, 23 insertions(+), 16 deletions(-) diff --git a/metal/classifier.py b/metal/classifier.py index 3f16fb43..26d7ab13 100644 --- a/metal/classifier.py +++ b/metal/classifier.py @@ -10,7 +10,7 @@ from metal.analysis import confusion_matrix from metal.metrics import metric_score -from metal.utils import Checkpointer, hard_to_soft, recursive_merge_dicts +from metal.utils import Checkpointer, recursive_merge_dicts class Classifier(nn.Module): @@ -161,9 +161,6 @@ def _train(self, train_loader, loss_fn, dev_loader=None): disable=train_config["disable_prog_bar"], ): - # converting hard to soft labels for training - data[1] = self._preprocess_Y(data[1], self.k) - # moving data to GPU if train_config["use_cuda"]: data = [d.cuda() for d in data] @@ -295,15 +292,6 @@ def _set_scheduler(self, scheduler_config, optimizer): ) return lr_scheduler - def _preprocess_Y(self, Y, k): - """Convert Y to soft labels if necessary""" - Y = Y.clone() - - # If hard labels, convert to soft labels - if Y.dim() == 1 or Y.shape[1] == 1: - Y = hard_to_soft(Y.long(), k=k) - return Y - def _batch_evaluate(self, loader, break_ties="random", **kwargs): """Evaluates the model using minibatches @@ -325,7 +313,12 @@ def _batch_evaluate(self, loader, break_ties="random", **kwargs): if self.config["train_config"]["use_cuda"]: X_batch = X_batch.cuda() - Y.append(self._to_numpy(Y_batch)) + Y_batch = self._to_numpy(Y_batch) + + if Y_batch.ndim > 1: + Y_batch = self._break_ties(Y_batch, break_ties) + + Y.append(Y_batch) Y_p.append( self._to_numpy( self.predict(X_batch, break_ties=break_ties, **kwargs) @@ -353,10 +346,15 @@ def evaluate(self, data, break_ties="random", **kwargs): if type(data) is tuple: X, Y = data + if self.config["train_config"]["use_cuda"]: X = X.cuda() Y = self._to_numpy(Y) + + if Y.ndim > 1: + Y = self._break_ties(Y, break_ties) + Y_p = self.predict(X, break_ties=break_ties, **kwargs) elif type(data) is DataLoader: diff --git a/metal/end_model/end_model.py b/metal/end_model/end_model.py index 6937b322..67738d9a 100644 --- a/metal/end_model/end_model.py +++ b/metal/end_model/end_model.py @@ -7,7 +7,7 @@ from metal.end_model.em_defaults import em_default_config from metal.end_model.loss import SoftCrossEntropyLoss from metal.modules import IdentityModule -from metal.utils import MetalDataset, recursive_merge_dicts +from metal.utils import MetalDataset, hard_to_soft, recursive_merge_dicts class EndModel(Classifier): @@ -150,8 +150,17 @@ def update_config(self, update_dict): """Updates self.config with the values in a given update dictionary""" self.config = recursive_merge_dicts(self.config, update_dict) + def _preprocess_Y(self, Y, k): + """Convert Y to soft labels if necessary""" + Y = Y.clone() + + # If hard labels, convert to soft labels + if Y.dim() == 1 or Y.shape[1] == 1: + Y = hard_to_soft(Y.long(), k=k) + return Y + def _make_data_loader(self, X, Y, data_loader_config): - dataset = MetalDataset(X, Y) + dataset = MetalDataset(X, self._preprocess_Y(Y, self.k)) data_loader = DataLoader(dataset, shuffle=True, **data_loader_config) return data_loader From 9610ba5b608da11190404f47c0c539bc9fa522f9 Mon Sep 17 00:00:00 2001 From: Jared Date: Wed, 10 Oct 2018 12:49:52 -0700 Subject: [PATCH 12/35] updated multitask classifier --- metal/multitask/mt_classifier.py | 93 ++++++++++++++++++++++++++++---- metal/multitask/mt_end_model.py | 11 +++- 2 files changed, 92 insertions(+), 12 deletions(-) diff --git a/metal/multitask/mt_classifier.py b/metal/multitask/mt_classifier.py index b9a5654b..c0b83962 100644 --- a/metal/multitask/mt_classifier.py +++ b/metal/multitask/mt_classifier.py @@ -1,5 +1,5 @@ - import numpy as np +from torch.utils.data import DataLoader from metal.classifier import Classifier from metal.metrics import metric_score @@ -40,10 +40,84 @@ def __init__(self, K, config): self.multitask = True self.K = K + def _batch_evaluate(self, loader, break_ties="random", **kwargs): + """Evaluates the model using minibatches + + Args: + loader: Pytorch DataLoader supplying (X,Y): + X: The input for the predict method + Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels + in {1,...,k}; can be None for cases with no ground truth + + Returns: + Y_p: an np.ndarray of predictions + Y: an np.ndarray of ground truth labels + """ + Y = [] + Y_p = [] + for batch, data in enumerate(loader): + X_batch, Y_batch = data + + if self.config["train_config"]["use_cuda"]: + X_batch = X_batch.cuda() + + self._check(Y_batch, typ=list) + Y_batch = [self._to_numpy(Y_t) for Y_t in Y_batch] + + Y_p = self.predict(X_batch, break_ties=break_ties, **kwargs) + self._check(Y_p, typ=list) + + Y.append(Y_batch) + Y_p.append( + self._to_numpy( + self.predict(X_batch, break_ties=break_ties, **kwargs) + ) + ) + + # TODO: Check dims here (is hstack the right thing?) + Y = np.hstack(Y) + Y_p = np.hstack(Y_p) + + return Y_p, Y + + def evaluate(self, data, break_ties="random", **kwargs): + """Evaluates the model + Args: + data: either a Pytorch DataLoader or tuple supplying (X,Y): + X: The input for the predict method + Y: A t-length list of [n] or [n, 1] np.ndarrays or + torch.Tensors of gold labels in {1,...,K_t} + + Returns: + Y_p: an np.ndarray of predictions + Y: an np.ndarray of ground truth labels + """ + + if type(data) is tuple: + X, Y = data + + if self.config["train_config"]["use_cuda"]: + X = X.cuda() + + self._check(Y, typ=list) + Y = [self._to_numpy(Y_t) for Y_t in Y] + + Y_p = self.predict(X, break_ties=break_ties, **kwargs) + self._check(Y_p, typ=list) + + elif type(data) is DataLoader: + Y_p, Y = self._batch_evaluate(data, break_ties=break_ties) + + else: + raise ValueError( + "Unrecognized input data structure, use tuple or DataLoader!" + ) + + return Y_p, Y + def score( self, - X, - Y, + data, metric="accuracy", reduce="mean", break_ties="random", @@ -52,9 +126,10 @@ def score( ): """Scores the predictive performance of the Classifier on all tasks Args: - X: The input for the predict method - Y: A t-length list of [n] or [n, 1] np.ndarrays or torch.Tensors of - gold labels in {1,...,K_t} + data: either a Pytorch DataLoader or tuple supplying (X,Y): + X: The input for the predict method + Y: A t-length list of [n] or [n, 1] np.ndarrays or + torch.Tensors of gold labels in {1,...,K_t} metric: The metric with which to score performance on each task reduce: How to reduce the scores of multiple tasks: None : return a t-length list of scores @@ -64,11 +139,9 @@ def score( scores: A (float) score or a t-length list of such scores if reduce=None """ - self._check(Y, typ=list) - Y = [self._to_numpy(Y_t) for Y_t in Y] - Y_p = self.predict(X, break_ties=break_ties, **kwargs) - self._check(Y_p, typ=list) + # TODO: TESTS! + Y_p, Y = self.evaluate(data, break_ties=break_ties) task_scores = [] for t, Y_tp in enumerate(Y_p): diff --git a/metal/multitask/mt_end_model.py b/metal/multitask/mt_end_model.py index a0156b5d..7ecc1576 100644 --- a/metal/multitask/mt_end_model.py +++ b/metal/multitask/mt_end_model.py @@ -92,7 +92,9 @@ def _build(self, input_modules, middle_modules, head_modules): self.heads = self._build_task_heads(head_modules) # Construct loss module - self.criteria = SoftCrossEntropyLoss(reduction="sum") + self.criteria = SoftCrossEntropyLoss( + reduction="sum", use_cuda=self.config["train_config"]["use_cuda"] + ) def _build_input_layer(self, input_modules): if input_modules is None: @@ -293,8 +295,13 @@ def _make_data_loader(self, X, Y, data_loader_config): def _get_loss_fn(self): """Returns the loss function to use in the train routine""" + if hasattr(self.config, "use_cuda"): + if self.config["use_cuda"]: + criteria = self.criteria.cuda() + else: + criteria = self.criteria loss_fn = lambda X, Y: sum( - self.criteria(Y_tp, Y_t) for Y_tp, Y_t in zip(self.forward(X), Y) + criteria(Y_tp, Y_t) for Y_tp, Y_t in zip(self.forward(X), Y) ) return loss_fn From 4a608c7302a7fdf4eab81335985d083e2eac0f97 Mon Sep 17 00:00:00 2001 From: Jared Date: Wed, 10 Oct 2018 12:52:04 -0700 Subject: [PATCH 13/35] doc update --- metal/end_model/end_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metal/end_model/end_model.py b/metal/end_model/end_model.py index 67738d9a..6ce58638 100644 --- a/metal/end_model/end_model.py +++ b/metal/end_model/end_model.py @@ -184,7 +184,7 @@ def _convert_input_data(self, data): loader = data else: raise ValueError( - "Unrecognized input data structure, use tuple or DataLoader!" + "Unrecognized input data structure, use tuple or DataLoader." ) return loader From f43d804b490779adecfe7b15e24b4b27651a9d67 Mon Sep 17 00:00:00 2001 From: Jared Date: Wed, 10 Oct 2018 12:55:07 -0700 Subject: [PATCH 14/35] typo --- metal/multitask/mt_classifier.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metal/multitask/mt_classifier.py b/metal/multitask/mt_classifier.py index c0b83962..8ab3f592 100644 --- a/metal/multitask/mt_classifier.py +++ b/metal/multitask/mt_classifier.py @@ -110,7 +110,7 @@ def evaluate(self, data, break_ties="random", **kwargs): else: raise ValueError( - "Unrecognized input data structure, use tuple or DataLoader!" + "Unrecognized input data structure, use tuple or DataLoader." ) return Y_p, Y From 0375ad96252cd461c1cd0a96c653acdfc9647486 Mon Sep 17 00:00:00 2001 From: Jared Date: Wed, 10 Oct 2018 19:12:02 -0700 Subject: [PATCH 15/35] tests passing --- metal/multitask/mt_classifier.py | 43 ++++++++++++++------ tests/metal/end_model/test_end_model.py | 28 ++++++------- tests/metal/multitask/test_mt_end_model.py | 31 +++++++------- tests/metal/multitask/test_mt_label_model.py | 2 +- 4 files changed, 59 insertions(+), 45 deletions(-) diff --git a/metal/multitask/mt_classifier.py b/metal/multitask/mt_classifier.py index 8ab3f592..dc114bb2 100644 --- a/metal/multitask/mt_classifier.py +++ b/metal/multitask/mt_classifier.py @@ -46,8 +46,9 @@ def _batch_evaluate(self, loader, break_ties="random", **kwargs): Args: loader: Pytorch DataLoader supplying (X,Y): X: The input for the predict method - Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels - in {1,...,k}; can be None for cases with no ground truth + Y: A t-length list of [n] or [n, 1] np.ndarrays or + torch.Tensors of gold labels in {1,...,K_t} + Returns: Y_p: an np.ndarray of predictions @@ -61,20 +62,26 @@ def _batch_evaluate(self, loader, break_ties="random", **kwargs): if self.config["train_config"]["use_cuda"]: X_batch = X_batch.cuda() + Y_batch_list = [] + # Breaking ties for each task if soft labels provided + for Y_t in Y_batch: + Y_t = self._to_numpy(Y_t) + if Y_t.ndim > 1: + Y_t = self._break_ties(Y_t, break_ties) + Y_batch_list.append(Y_t) + + # Overwriting with tiebroken Y + Y_batch = Y_batch_list + self._check(Y_batch, typ=list) - Y_batch = [self._to_numpy(Y_t) for Y_t in Y_batch] - Y_p = self.predict(X_batch, break_ties=break_ties, **kwargs) - self._check(Y_p, typ=list) + Y_p_batch = self.predict(X_batch, break_ties=break_ties, **kwargs) + self._check(Y_p_batch, typ=list) + Y_p_batch = self._to_numpy(Y_p_batch) Y.append(Y_batch) - Y_p.append( - self._to_numpy( - self.predict(X_batch, break_ties=break_ties, **kwargs) - ) - ) + Y_p.append(Y_p_batch) - # TODO: Check dims here (is hstack the right thing?) Y = np.hstack(Y) Y_p = np.hstack(Y_p) @@ -100,7 +107,19 @@ def evaluate(self, data, break_ties="random", **kwargs): X = X.cuda() self._check(Y, typ=list) - Y = [self._to_numpy(Y_t) for Y_t in Y] + + Y_list = [] + + # Breaking ties for each task if soft labels provided + for Y_t in Y: + Y_t = self._to_numpy(Y_t) + if Y_t.ndim > 1: + Y_t = self._break_ties(Y_t, break_ties) + Y_list.append(Y_t) + + # Overwriting with tiebroken Y + Y = Y_list + self._check(Y, typ=list) Y_p = self.predict(X, break_ties=break_ties, **kwargs) self._check(Y_p, typ=list) diff --git a/tests/metal/end_model/test_end_model.py b/tests/metal/end_model/test_end_model.py index 7b844b1d..ec8ffbb0 100644 --- a/tests/metal/end_model/test_end_model.py +++ b/tests/metal/end_model/test_end_model.py @@ -33,8 +33,8 @@ def setUpClass(cls): def test_logreg(self): em = LogisticRegression(seed=1, input_dim=2, verbose=False) Xs, Ys = self.single_problem - em.train(Xs[0], Ys[0], Xs[1], Ys[1], n_epochs=5) - score = em.score(Xs[2], Ys[2], verbose=False) + em.train((Xs[0], Ys[0]), dev_data=(Xs[1], Ys[1]), n_epochs=5) + score = em.score((Xs[2], Ys[2]), verbose=False) self.assertGreater(score, 0.95) def test_softmax(self): @@ -54,8 +54,8 @@ def test_softmax(self): + 1 ) Ys.append(Y) - em.train(Xs[0], Ys[0], Xs[1], Ys[1], lr=0.1, n_epochs=10) - score = em.score(Xs[2], Ys[2], verbose=False) + em.train((Xs[0], Ys[0]), dev_data=(Xs[1], Ys[1]), lr=0.1, n_epochs=10) + score = em.score((Xs[2], Ys[2]), verbose=False) self.assertGreater(score, 0.95) def test_sparselogreg(self): @@ -74,9 +74,9 @@ def test_sparselogreg(self): em = SparseLogisticRegression( seed=1, input_dim=F, padding_idx=0, verbose=False ) - em.train(X, Y, n_epochs=5, optimizer="sgd", lr=0.0005) + em.train((X, Y), n_epochs=5, optimizer="sgd", lr=0.0005) self.assertEqual(float(em.network[-1].W.weight.data[0, :].sum()), 0.0) - score = em.score(X, Y, verbose=False) + score = em.score((X, Y), verbose=False) self.assertGreater(score, 0.95) def test_singletask(self): @@ -89,8 +89,8 @@ def test_singletask(self): verbose=False, ) Xs, Ys = self.single_problem - em.train(Xs[0], Ys[0], Xs[1], Ys[1], n_epochs=5) - score = em.score(Xs[2], Ys[2], verbose=False) + em.train((Xs[0], Ys[0]), dev_data=(Xs[1], Ys[1]), n_epochs=5) + score = em.score((Xs[2], Ys[2]), verbose=False) self.assertGreater(score, 0.95) def test_singletask_extras(self): @@ -103,8 +103,8 @@ def test_singletask_extras(self): verbose=False, ) Xs, Ys = self.single_problem - em.train(Xs[0], Ys[0], Xs[1], Ys[1], n_epochs=5) - score = em.score(Xs[2], Ys[2], verbose=False) + em.train((Xs[0], Ys[0]), dev_data=(Xs[1], Ys[1]), n_epochs=5) + score = em.score((Xs[2], Ys[2]), verbose=False) self.assertGreater(score, 0.95) def test_custom_modules(self): @@ -122,15 +122,13 @@ def test_custom_modules(self): ) Xs, Ys = self.single_problem em.train( - Xs[0], - Ys[0], - Xs[1], - Ys[1], + (Xs[0], Ys[0]), + dev_data=(Xs[1], Ys[1]), n_epochs=5, verbose=False, show_plots=False, ) - score = em.score(Xs[2], Ys[2], verbose=False) + score = em.score((Xs[2], Ys[2]), verbose=False) self.assertGreater(score, 0.95) diff --git a/tests/metal/multitask/test_mt_end_model.py b/tests/metal/multitask/test_mt_end_model.py index 756cb576..ab8052d4 100644 --- a/tests/metal/multitask/test_mt_end_model.py +++ b/tests/metal/multitask/test_mt_end_model.py @@ -49,14 +49,12 @@ def test_multitask_top(self): top_layer = len(em.config["layer_out_dims"]) - 1 self.assertEqual(len(em.task_map[top_layer]), em.t) em.train( - self.Xs[0], - self.Ys[0], - self.Xs[1], - self.Ys[1], + (self.Xs[0], self.Ys[0]), + dev_data=(self.Xs[1], self.Ys[1]), verbose=False, n_epochs=10, ) - score = em.score(self.Xs[2], self.Ys[2], reduce="mean", verbose=False) + score = em.score((self.Xs[2], self.Ys[2]), reduce="mean", verbose=False) self.assertGreater(score, 0.95) def test_multitask_custom_attachments(self): @@ -75,14 +73,12 @@ def test_multitask_custom_attachments(self): self.assertEqual(em.task_map[1][0], 0) self.assertEqual(em.task_map[2][0], 1) em.train( - self.Xs[0], - self.Ys[0], - self.Xs[1], - self.Ys[1], + (self.Xs[0], self.Ys[0]), + dev_data=(self.Xs[1], self.Ys[1]), verbose=False, n_epochs=10, ) - score = em.score(self.Xs[2], self.Ys[2], reduce="mean", verbose=False) + score = em.score((self.Xs[2], self.Ys[2]), reduce="mean", verbose=False) self.assertGreater(score, 0.95) def test_multitask_two_modules(self): @@ -103,9 +99,12 @@ def test_multitask_two_modules(self): for i, X in enumerate(self.Xs): Xs.append([X[:, 0], X[:, 1]]) em.train( - Xs[0], self.Ys[0], Xs[1], self.Ys[1], verbose=False, n_epochs=10 + (Xs[0], self.Ys[0]), + dev_data=(Xs[1], self.Ys[1]), + verbose=False, + n_epochs=10, ) - score = em.score(Xs[2], self.Ys[2], reduce="mean", verbose=False) + score = em.score((Xs[2], self.Ys[2]), reduce="mean", verbose=False) self.assertGreater(score, 0.95) def test_multitask_custom_heads(self): @@ -123,14 +122,12 @@ def test_multitask_custom_heads(self): task_head_layers=[1, 2], ) em.train( - self.Xs[0], - self.Ys[0], - self.Xs[1], - self.Ys[1], + (self.Xs[0], self.Ys[0]), + dev_data=(self.Xs[1], self.Ys[1]), verbose=False, n_epochs=10, ) - score = em.score(self.Xs[2], self.Ys[2], reduce="mean", verbose=False) + score = em.score((self.Xs[2], self.Ys[2]), reduce="mean", verbose=False) self.assertGreater(score, 0.95) diff --git a/tests/metal/multitask/test_mt_label_model.py b/tests/metal/multitask/test_mt_label_model.py index e5520946..de84edc5 100644 --- a/tests/metal/multitask/test_mt_label_model.py +++ b/tests/metal/multitask/test_mt_label_model.py @@ -33,7 +33,7 @@ def _test_label_model(self, data, test_acc=True): # Test label prediction accuracy if test_acc: - acc = label_model.score(data.L, data.Y) + acc = label_model.score((data.L, data.Y)) self.assertGreater(acc, 0.95) def test_multitask(self): From 7b3d3993eefa97640d99e68c94f3bd59a9ab6a7f Mon Sep 17 00:00:00 2001 From: Jared Date: Wed, 10 Oct 2018 19:36:10 -0700 Subject: [PATCH 16/35] updated tutorials --- tutorials/Basics.ipynb | 214 +++++++++++++++++++++++++------------- tutorials/Multitask.ipynb | 163 +++++++++++++++++++---------- 2 files changed, 252 insertions(+), 125 deletions(-) diff --git a/tutorials/Basics.ipynb b/tutorials/Basics.ipynb index fe729cac..970f2a18 100644 --- a/tutorials/Basics.ipynb +++ b/tutorials/Basics.ipynb @@ -38,7 +38,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -47,18 +47,9 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The autoreload extension is already loaded. To reload it, use:\n", - " %reload_ext autoreload\n" - ] - } - ], + "outputs": [], "source": [ "%load_ext autoreload\n", "%autoreload 2\n", @@ -108,7 +99,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -127,7 +118,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -154,7 +145,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "metadata": { "scrolled": true }, @@ -275,7 +266,7 @@ "9 [1, 2] 0.784 0.784 0.735" ] }, - "execution_count": 5, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } @@ -295,17 +286,19 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "metadata": {}, "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAWMAAAEICAYAAACK8ZV4AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJztnX2QX2WV5z9fEkwMikkE2ZCEAXlxxPchRdzVdTPi8Dau2T/kRVcMyFRmq0AMAzskrjtQiGzc2hKZwsVJSZwElZCJWqScrDGoKWd2RSDRBYERWoikSeQtAXlbMHj2j/u0/hL65XZu9/nd3+nzqerq7qefe597+3fv9557nvOcIzMjSZIk6S4HdPsAkiRJkhTjJEmSVpBinCRJ0gJSjJMkSVpAinGSJEkLSDFOkiRpASnGybghabOkv/DedqyR9KykN3b7OJLYpBgnIyJpm6QPdPs4BpB0hSSTdNE+7UtK+xU191NL8M3sNWb24H4ebpLUIsU46VXuBxbt0/bx0j4mSJo8VvtKkpFIMU72G0kzJH1H0uOSdpef5+zT7WhJt0t6WtItkmZ2bP9uSf9H0lOS/q+kBaMY/g5gmqS3lH29BXh1aR/x+CR9Dvi3wHXFDXFdaTdJF0h6AHigo+0YSa+S9DNJnyztkyT9b0l/M8p/XZK8ghTjpAkHAF8F/gg4AngBuG6fPh8HPgEcDuwB/hZA0mzgH4GrgJnApcA3JR06ivFvLPuHykpeXff4zOy/AP8EXFjcEBd2bPcfgPnA8Z07M7OXgI8BV0p6M7AUmAR8bhTHnCSDkmKc7Ddm9qSZfdPMnjezZ6hE6d/t0+1GM/u5mT0H/FfgTEmTqERtg5ltMLPfmdkm4E7g9FEcwteAj0g6EDi7/D7a4xuM/2Zmu8zshUHO+edUD5BvUz1AzjGzl0dxzEkyKCnGyX4jaZqkv5P0K0m/AX4ETC9iO8D2jp9/BRwIHEJlrZ5RXBRPSXoKeC8wq+74ZvYw0AdcDTxgZp1j1T2+wdg+wt9XAUdSPUweqHu8STIcKcZJEy4B3gTMN7ODgfeVdnX0mdvx8xHAb4EnqATvRjOb3vF1kJktH+UxrC7Hsa+Los7xDZWycKRUhv8T+A5wiqT3ju5wk2RwUoyTuhwoaWrH12TgtVR+2KfKxNzlg2z3MUnHS5oGXAmsK6/1XwP+vaRTykTYVEkLBpkAHImbgZOBtYP8baTjexQYVfywpHOAE4BzgYuAVZJeM8pjTpJXkGKc1GUDlbANfF0BfJEqguEJ4Dbgu4NsdyPw98CvgalUAkZxKSwEPg08TmUp/2dGeU2a2Qtmdutg/t0ax3ct8OESafG3I40l6Yiyz4+b2bNm9g0qP/c1oznmJBkMZXL5JEmS7pOWcZIkSQtwF2NJp0r6haQ+SUu9x0+SJGkjrm6KElJ0P/BnQD/VaqmPmNm9bgeRJEnSQrwt4xOBPjN7sKxmWkM1iZMkSTKh8U6EMpu9A+r7qZad/h5Ji4HFAAdN0wl/fMyrXA7s7t2jWYXbjLfNeNxlnPvvmuYyDsBxb3/ebSzPz8oTr+vCE8/P6qXt/U+YWaMBT/nTg+zJXfUWVG6568WNZnZqk/E68RZjDdK2l5/EzFYAKwCmHDHXnvzEEo/j4oWzvuwyTsXckbuMAacc/k6XcQA2bvyZ21hH3/yf3Mb6ZcDrIiqTZvGrpvt4ctfL3L7xiJrjPXBI0/E68Rbjfva+4uYAO4bqPGX7cxxz8W3jflAAnOUzDPiJyS93eAqJH54CGVH4PR/Sfde8222sKlVI7+ItxncAx0o6CniEKrnLR4fqfNzbn3e1uLyI+ICJittnBW6f18YdfvfUKYe7DcU2v6HGBVcxNrM9ki4ENlKlHlxpZvd4HkMb8LoZXN0Ujje4q7Xq+Hbh9Xl5Wque/79JtVNMtRP3SgZmtoFqae2I3H/XNLcLNKKYpJuiORFf6X394EldWl1WxtNN4XnTRRXJiHhakem+mti0Wow9LWPPm+7om+NZQBEnurzHSpGc2LRajF+cexB9l3jOxsbC09o/hngTXd5ENDzSJVKfVovx22Y8zu0Bw33cJvAujndOENcK9yKqz70q+tK7tFqM7959aMjJrjynZkQMNwPfB5oXnueU0RTjiOeij6MJKCaOQuLqn3b8rLz8+xDTCk/q02ox9vQZR5yoiWqtevqnI1qrSTtptRh7EjGe1FMgc1KoORFj6pP6tFqMPd0UvmvoffC96TyX2MZ7cIJzRErSOlotxp6LPiL6BiNGiHgT0X2VtJNWi7FnNIUnEaMpkt4h4j1VkVnbxg3XFJoRCWpp+bqU4rlfooY89jqtFuOo0RReN11UN0XUZd4Rfcaen9UknzoU40arxThqnHG6D3oHzzezqH73pB6tFmPf5PLxlvNGtPYhboSD79JhH7LSR31aLcauy6GjZgJzImycccRyXK4PabehstLHeOKZKCiTzzQjqhUe0Rce9f+XuSnGkaihbV5EfcD41nCLJ1xR/3/hs7ZJWgl8EHjMzN5a2mYCNwNHUr0dnGlmuyUJuBY4HXgeONfMtpZtFgGfKbu9ysxWjTS2p2WcNCPqQzPiZKvrZ3WN31AsWec42NgjMxu+g/Q+4FlgdYcY/3dgl5ktl7QUmGFml0k6HfgklRjPB641s/lFvO8E5gEGbAFOMLPdw419sGbafJ3U7AxrkjPZvUPUycKI7itPJs3q22Jm85rsY947ptrtG4+oOd4DjcfrZETL2Mx+JOnIfZoXAgvKz6uAzcBlpX21VQp/m6TpkmaVvpvMbBeApE3AqcBNw43tuxw63it9xHMC70T28XKWRIzaqAjuphiCw8xsJ4CZ7ZT0htI+G9je0a+/tA3V3hoiTkB5hmWFzAdNzNVqEV0vkBN4+6JB2myY9lfuQFoMLAY4Ynar5xeTDqKm6/SMP083xcRmf9XuUUmzilU8C3istPcDczv6zQF2lPYF+7RvHmzHZrYCWAEw5Yi5FnFiyMtijZgWFOKKVkT3Qc7F1Gd/xXg9sAhYXr7f0tF+oaQ1VBN4TxfB3ghcLWlG6XcysGykQbIgaVOChjAFTYCUwjWxqRPadhOVVXuIpH7gcioRXivpfOBh4IzSfQNVJEUfVWjbeQBmtkvSZ4E7Sr8rBybzkvEj5gMm7sRkVvqY2IwY2tZNMrStd4gqkEkzPK+LbUsujR3a1k2ihrZ5EVW0Uvh7h4mcQlPSXGA18K+A3wErzOzaofq3Woyj4hV5cMrFfm6KqBWbU/ib4WvktC5r2x7gEjPbKum1wBZJm8zs3sE6pxgHJl0vzYmYyD5qhsK2WcZlLcbAeoxnJN1Htb4ixbgteIlkRNcLxE34HtEyTirKKuZ3AT8Zqk+KcSEtoN7B0/0S8YEW9broAodIurPj9xVlncReSHoN8E1giZn9ZqidtVqM779rWshwn4i5KaJaq565KSKKZHCf8RMjRVNIOpBKiL9uZt8arm+rxdgzmiJiTK6rte+Ym8KzeoRrCkgnoj6kt7mNVI+SUvgG4D4z+8KI/TPOuCLi0uGIllbSW3gaObfaulbFGUt6L/BPwN1UoW0AnzazDYP1b7Vl/OLcg+i7xEckI/qMMyyrORHfmCKeE7Qva5uZ/TODJ0kblFaLcVQiClfEiS7A1U3h5Z92Ta0a22c8prRajKdsf87N5xQxH2/ESclkDHBNtORoGbcszni0tFqMPd0UEaMBwlqrjkR8yER1U/Q6rRZjT8vYE6+bwbOiQ9S6dBFzDEdNzt/rtFqMPcnCk83wtYAcxwqZOzkFso20WoyjRlNEXMiSLpHmeF2D6aZoJ60WY08iXqBRg/s932Jc3WROVngKZDtptRh7+owjXqBRV+C5unlCuin88PW59zmONfa0WoyjLof2suxcrTrXeNygwu+E6//PcRK5bYs+Rkurxfju3Ye6XTiegfARrX3PhDoRXQeeRHzARKBOQdJBS4dImgncDBxJlaPjTDPbXZJjXEtVmPR54Fwz21r2tQj4TNn1VWa2arixo4a2RfQZR3WJRIw8iPgWWBF/Bd6gpUOAc4Hvm9lySUuBpcBlwGnAseVrPnA9ML+I9+XAPMDKftab2e6xPqn9IX3GzYgq/BGJGoYYfgXeMKVDFgILSrdVwGYqMV4IrLYqHdxtkqZLmlX6bjKzXQBF0E8FbhpqbE+fsScRQ9s8SeHvHXICrz6j8hnvUzrksCLUmNlOSW8o3WYD2zs26y9tQ7XvO8ZiYDHAVPySy7u+TrlNdsUU46hEfEhP5Kxto6W2GO9bOqRyDQ/edZA2G6Z974aqbMkKqPIZ1z2+pkSNk/UiLcjmRHyTScu4PrXEeIjSIY9KmlWs4lnAY6W9H5jbsfkcYEdpX7BP++bhxo3rpvAZx3eiy4+oD86IDzRXw2PJOr+xxoE60RRDlQ5ZDywClpfvt3S0XyhpDdUE3tNFsDcCV0uaUfqdDCwbbmzPGniuq7ocw+i8iBpN4Sn8XoVWo6ZWDT+BB7wHOAe4W9LAp/hpKhFeK+l84GHgjPK3DVRhbX1UoW3nAZjZLkmfBe4o/a4cmMwbCt9FHy7DAJ43Q7y3CnDOI+JYiTqimyKpT51oiuFKh7yiQF2JorhgiH2tBFbWPTjPRR++hSfzpmtC1BVkycSm1Svw3jbjcW4PmMnKa1VXxORHENO3Cn4PmYgZCismwARet/D0GecrYu8QNRudlxUe9c1iwoS2dQPPfMaeuRW8iFrpw3MCNGJO6KhvFr1Oq8U4rJvCCc/JJ08iCiTEFMl0U9Sn1WLsOYEXcaImqpBEPS8v0k3RTlotxp5Z2zytSK+Y5ohC4k1E4c8JvHbSajGOWgPPK7QtousF4vqMvYgaZZOW8TjiaRlHXdXlRUTR8iZi5JBvodr4+Yy7hqdl7EnE5PIxVzA6h9E5WfxRQwO3uY00PrRajKMWJHV7TXRcVRhVIGMWP3W81oNG9IwHrRbjpBlRa8VFjdzwIid220mrxdgzUVDUcJ+IuE5MuuYs8SHqtZ4TeEGIaEVGnTWPWsMt6T537z6UY9b+Zc3eYzth2GoxzqxtzfDM0ey5nDzqa3ZEl4gvGU2RjBKvmy5iCB3g6p/2xOshE/WNaSIkl58QeAqX1wUacVUhpBWexKTVYuyaKMhRuCIG93vGGUcs6AqOboqALrkItFqMk2ZEnTX3tIwjpgaN+jDrdeoUJJ0K/AiYUvqvM7PLJR0FrAFmAluBc8zsJUlTgNXACcCTwFlmtq3saxlwPvAycJGZbRxu7KgFSb1efT1FK+qqLk/8VmamGLeROpbxi8D7zexZSQcC/yzpfwF/BVxjZmskfZlKZK8v33eb2TGSzgY+D5wl6XjgbOAtwOHArZKOM7OXx+G8Rk1E32DU7FyZB6MZEa/1CNQpSGrAs+XXA8uXAe8HPlraVwFXUInxwvIzwDrgOkkq7WvM7EXgIUl9wInAj8fiRJJXkgLZHM//odfbmeebha9LZAKEtkmaBGwBjgG+BPwSeMrM9pQu/cDs8vNsYDuAme2R9DTw+tLeeRV0btM51mJgMcCkGTPou9znw/ScgPLC90aIKcaeuFmsjqGB6RKpTy0xLq6Ed0qaDnwbePNg3cp3DfG3odr3HWsFsALgYM00r6e4p3BF9XlGJKrF74WnS2RCxRmb2VOSNgPvBqZLmlys4znAjtKtH5gL9EuaDLwO2NXRPkDnNoPim5sin+BJMtZkPuP61ImmOBT4bRHiVwMfoJqU+yHwYaqIikXALWWT9eX3H5e//8DMTNJ64BuSvkA1gXcscPsYn89+k6kSe4f0hScRqWMZzwJWFb/xAcBaM/uOpHuBNZKuAn4K3FD63wDcWCbodlFFUGBm90haC9wL7AEuGCmSwjM3RdxJDR88H2ZRfeFuxXcdP6t0U9SnTjTFXcC7Bml/kCoaYt/2/wecMcS+Pgd8bvSHOf74JkePN2ue+Yx7h4jnVBHcTdFNPJdDexKxeknUGzyi8EeNMw5vGU8UIvoho66Ki/qQiSiSOYFXn1aLcdTl0BEn1qJmbYtI1AdMWsbjiGdom+eij6OJ9zoa9QaPSNTrIi3jccQzmsIz65ib8AdNwh7RpQRBiw4ErCE4XrRajD1xfYI7XaCer/O5qrA5uRx6YpNiXMhX32Z4uV4ghb8pEXM0A2xzG2l8aLUYRw1ti0jMFYwx8a0A4yf8vU6rxTgqEWNyo4a2RUyh6fngdC1IOsttqHGh1WLsGdoWMQeBa3L5gDUEwfu6iHcNJvVptRh7hrYlzYiYb6Mi3vUX0dqvyNC2ccPTMvbEK4zO83/nW5DUz80TUbg8PyvPh1ku+hhHolrG6XppRlT/tJdwRXzAVKRlnIwSrxs84lsFxH3IeBHxAQNpGYch4nLeqFEHcSfwfEjLuJ2kGBcilrX3FJKIogUxH9JR6z1ucxtpfEgxLkR9pfciYuw0xFyZGdXnnnHG40jUOGMv4YqaF8BTTDzjpyNGU2TWtvrUFuNSA+9O4BEz+6Cko6iKkc4EtgLnmNlLkqYAq4ETgCeBs8xsW9nHMuB84GXgIjPbOJYn0wRXy9gpUVDcVIl+RJzsiuh6gfZN4ElaCXwQeMzM3jpS/9FYxp8C7gMOLr9/HrjGzNZI+jKVyF5fvu82s2MknV36nSXpeKripG+hqg59q6TjhitK+uLcg+i7JGC9uKQRUf3TXkR0vbSUvweuozJOR6SWGEuaA/w5VTHRv5Ik4P3AR0uXVcAVVGK8sPwMsA64rvRfCKwxsxeBh0r16BOBH9c5hvEmogXkSd7gSbI3ZvYjSUfW7V/XMv4i8NfAa8vvrweeMrM95fd+YHb5eTawvRzMHklPl/6zYa98ep3b/B5Ji4HFAJNmzKh7HskEIm5olg8RI4cq+hzHAuAQSXd2/L7CzFbs785GFGNJAz6PLZIWDDQP0tVG+Ntw2/yhoTqZFQAHa6a5uQ8yLWMyCGnxNyN4NMUTZjZvrHZWxzJ+D/AhSacDU6l8xl8EpkuaXKzjOcCO0r8fmAv0S5oMvA7Y1dE+QOc2XSfqpEZEIka+eJLXXzsZUYzNbBmwDKBYxpea2X+U9A/Ah6kiKhYBt5RN1pfff1z+/gMzM0nrgW9I+gLVBN6xwO3Dje2Zm8IzDCxiTHPUSbUUrsSLJnHGlwFrJF0F/BS4obTfANxYJuh2UUVQYGb3SFoL3AvsAS4YLpICfAuSRgyE9xT9fLNIkr2RdBOwgMq33A9cbmY3DNnf7BVu29ZwsGbafJ3kMlZUyy4iUYU/Fzg1Y9uSS7c09eFOOWKuzb60XsDyQ59qPl4nrV6BFzWFpheZUKe3iPjG5Lnar22LPkZLq8XYk4jCFdUCSjdFM/LB2U5aLcaeuSl840nzZugVMpqiGcHjjMeUVouxp5vilMNdhqlwimmOmG8D4r5mRyR4nPGY0mox9iTiq5tvcnm3ocKSIY8Tm1aLcdTQNrdUiY6vo3nTNSf/hxObVovxlO3PuYlkxERBEf2dEDc1aMRrPfMZ16fVYhw1tM3rdTSqvzPqQyYt44lNq8XY003haW15uSk8l3hnuFkyGFkDrz6tFmNPN4Vn1raIr6NRyYdMMzKaoj6tFuOoboqIRHUdRCQX6LSTVouxJ65i4hSTG7UgadQJPK/zSoFsJynGhYg18HzjjB1jZB1dSilciRcpxoWI5XU8LeNj8HuYRcwjEpUMbatPq8XYMzeFp5h43eCeN0KKVnMiRg55jpVZ28aRF+ceRN8l8VarRYwzjlok1NN9FTEuPBMF1afVYuxJxLwAEc/Jm7T4Ey9aLcauccaORHRTeJKTar1DxhnXp5YYS9oGPAO8DOwxs3mSZgI3A0dSLX4508x2SxJwLXA68DxwrpltLftZBHym7PYqM1s13LieboqIop+i1ZyIoW1JOxmNZfynZvZEx+9Lge+b2XJJS8vvlwGnUVV+PhaYD1wPzC/ifTkwDzBgi6T1ZrZ7DM4jScaFiDHNKfrtpImbYiFV5VOAVcBmKjFeCKy2qtLpbZKmS5pV+m4ys10AkjYBpwI3DTXA22Y8zu1eF45j7GrE6iVRb3BPv7tbRI/jtZ7Up64YG/A9SQb8nZmtAA4zs50AZrZT0htK39nA9o5t+0vbUO17IWkxsBhgKjHLLnnddCmQzckJvMSLumL8HjPbUQR3k6R/GaavBmmzYdr3bqiEfgVUZbMj+owza1syGF4PmXzAtJNaYmxmO8r3xyR9GzgReFTSrGIVzwIeK937gbkdm88BdpT2Bfu0bx5u3KjRFF5EjJ2GuGIS8bwyzrg+I4qxpIOAA8zsmfLzycCVwHpgEbC8fL+lbLIeuFDSGqoJvKeLYG8ErpY0o/Q7GVg23Nhxs7Z5FVlNgewlIlrGGdpWnzqW8WHAt6uINSYD3zCz70q6A1gr6XzgYeCM0n8DVVhbH1Vo23kAZrZL0meBO0q/Kwcm84bCczl0RDHx9IN7FiSN+FlBzPPK3BT1GVGMzexB4B2DtD8JnDRIuwEXDLGvlcDKugfnGWfsmlTHyfXiurw2YDRKVCKKfgRavQLPE0/ftNfNENVNETVkz8+K9PussuxSfVotxp5xxkfj+TrlczP4pgWNaW1lnHEz0mdcn1aLsafPOGLGrJhWnfN5OT6kMzxwYtNqMY5KxGWvnq+jKZBJRFotxp4TeJ6v2RFvcM/XUc/JVk+LP2LV8F6Lppiy/TmOXlLvc3io8Wh702oxds1N4YjXBRp1wYxnVRZPv7vfWDENj6z0MY54+owjztCfcnHMaIqo/mkvokbZ9DqtFmNfN0U8ot4IUZd5exkEESerI9BqMXYNbQtYFSPq4oh8yCQRabUY37370JCRB7nEuxkRrdWKeJ9XJgqqT6vF2DNrm2u4VMDXxKgCGTFW2/OcctFHfVotxlGJGE3ha4XHsyAh3RQTnVaLsecEXkQ3hScR43G9iehW6rU4427SajGOOoHn5aaIGgIWdQVexDcmT5dcr8cZq8p42U4O1kybr1dk6RwXIlolSXOiPtAiMmlW3xYzm9dkH6PRnFttXePxOmm1ZexZ6SNiIHxEdwh4Rzj4kVE2E5tWi7FnaBvX+AxTES+Fpq9VF1RMHFNbepGhbfVptRh74jop5HTTRfR3QtzJ1ogWa4a21aeWGEuaDnwFeCtgwCeAXwA3A0dSJdk/08x2qyqWdy1VHbzngXPNbGvZzyLgM2W3V5nZquHG9YwzjngjuFoljm8WrjP0rucVL3IoqU9dy/ha4Ltm9mFJrwKmAZ8Gvm9myyUtBZYClwGnAceWr/nA9cB8STOBy4F5VIK+RdJ6M9s91KCePuOIll1cN0WSxGNEMZZ0MPA+4FwAM3sJeEnSQmBB6bYK2EwlxguB1aUw6W2SpkuaVfpuGqgILWkTcCpw01Bju/qMHXGzWF394H5EvCYg5gMt44zrU8cyfiPwOPBVSe8AtgCfAg4zs50AZrZT0htK/9nA9o7t+0vbUO17IWkxsBhgKtNiuimcfMaeSdg9iShaEHM5dOYzrk8dMZ4M/AnwSTP7iaRrqVwSQ6FB2myY9r0bzFYAKwCmHDHXvFbgeQqX22RhVD+uIxFLV0VdNNPr1BHjfqDfzH5Sfl9HJcaPSppVrOJZwGMd/ed2bD8H2FHaF+zTvnm4gaOuwIsYJxs1ciNiNEXUslXh3RRm9mtJ2yW9ycx+AZwE3Fu+FgHLy/dbyibrgQslraGawHu6CPZG4GpJM0q/k4FlY3s6+49naFtEMU6aE/HtIt0U9akbTfFJ4OslkuJB4DzgAGCtpPOBh4EzSt8NVGFtfVShbecBmNkuSZ8F7ij9rhyYzBuKuIs+fIgYOw3Or76O5xUxtC0t4/rUEmMz+xlVSNq+vGIRd4miuGCI/awEVo7mAL2IeIFGjJ2GwBninAwCT9eLZ/HYbW4jjQ+5Aq8QMc444mtv0hzPh3TU/CjjQavF2HMCz/Oi8ZzNjkhUN4VXzo2I6WJhgiyH7hZRfcZer75RV+Clxd8MT9fLKRdnoqC6tFqMPYlaPcKLqH7cjHxJvEgxLnjedCn8zfD0eYZcDJS0klaLcdRFH35i4ilafv+/qFa4F2Gt/SXrun0EjWi1GEfFa7Iw7E2X9AyeD7NtbiONDynGBdeE5U6TGjmp1hxfl0i8REGe0SgZTTGORE2h6RkI70VU4Y8Yf560k1aLsScRS817Tj5FFZKIS8ojGjgV7VoOLelUqsIck4CvmNny4fqnGBfiXqA+RF1i60lEN4XndbHNbaSRkTQJ+BLwZ1QZK+8olY3uHWqbFOOk54i7mMUrd7djNErQB2cNTgT6zOxBgJLFciFVtstBSTEOTETXCwReQRYwc+AEZrDKRvOH26DVYuxZHTqitZXxuM3xzXwXLzeF5/+vC9EUh0i6s+P3FaVSEdSsbNRJq8X4xbkH4VV2KeIEVFTL2Jd4Gc48k/cEz2f8hJkNlloYhq54NCStFmNPyziimETMxetN1AxnybhzB3CspKOAR4CzgY8Ot0GrxdjTMo5IrsBrTsQ3Jtccw0Ef0iNhZnskXQhspAptW2lm9wy3TavFOKrPOEkmChO5Bp6ZbaAqQ1eLEcVY0puAmzua3gj8DbC6tB9JFeJ3ppntliSqQOfTqWrgnWtmW8u+FgGfKfu5ysxW1T3Q8SbiBFQ+YJrjaUW6fV6O1uoph/uNFT6fcakI/U74fSDzI8C3gaXA981suaSl5ffLgNOAY8vXfOB6YL6kmcDlVLX0DNhSgqB3DzX2cW9/no0bfSZQQt50SWOipuv0wvVan2BZ204Cfmlmv5K0EFhQ2lcBm6nEeCGwuhQmvU3SdEmzSt9NAxWhJW0CTgVuanoSY0HEfMZRRT9qnLHXAomIYZzQPjfFaBmtGJ/NH8TzMDPbCWBmOyW9obQPFuw8e5j2IYmaKCiqSHqR/79mRF0OHd5NMYCkVwEfApaN1HWQNhumfd9xFgOLAaYyLX25SVeJWOLJ1cDxjKaYQG6K04CtZvZo+f1RSbOKVTwLeKy0DxXs3M8f3BoD7Zv3HaSsYFkBMO8dU83LZ5ypEpsR1ececQVe1M+q1xmNGH+Evf2764FFwPLy/ZaO9gtLYoz5wNNFsDcCV0uaUfqdzAhWdlQ3hReDQLt9AAAGz0lEQVRRFyx4ztB7+oy9hMt3cYnjcuiJ4DOWNI0qFdxfdjQvB9ZKOh94GDijtG+gCmvrowptOw/AzHZJ+izVyhSAKwcm89qA5+toxNV+YV99HXG7Bh2rb6TPuD6qgh7aycGaafN1kstYGU3RjIi+VYg82eWD52e1bcmlW4bJFVGL0WjOrbau8XidtHoFnudy6Khi4kUKZHO8/NOuhQCyIGltWi3Gb5vxOLc73XiueWudiPqAieoLdzsvRzePa5xxFiQdPzwn8DwrEkQM1/M8J98IBz8iRtkk9Wm1GHsS0b8a8Zy8yWieXqJdBUlHS6vFOGrWNi/LzjPXga+bIv3TTfB8s5ioBUn3h1aLsW+iIJdhqrG8/NOOvsH0TzfHy1UWNgxxAq3AC02+0jfD19pyG8qViFE2SX1aLcb33zXN7zXH8QmeN10z8sHZjKgThRNiBV63iFp2yetm8PTXRVxV6E3E68KX3l6B12ox9owz9vSjRQzXc61EHbTUfMQ444hvFuNFq8U4aUbUWfOouSm8HmieD7Os9FGfVoux56KPiJZdxEgAiFvpI+JcQi6Hrk+rxTiqmyIiEYXEGy/hcl3B6JghLpdDjyNRoyki+tEyUVBz/Cp9xPysep1Wi3FU0opsRtxEQXldTGRaLcZRQ9vSMu4dcmKyGb7uv8xNMW64ptAM+DqaNCdiDbyo9R5z0UcQUiCTwXCNSPF6YwpogUeg1WKcE3jNyGXDSbdJN0V96hYkvRj4C8CAu6mKjM4C1gAzga3AOWb2kqQpwGrgBOBJ4Cwz21b2sww4H3gZuMjMNg43rmfWtpw86R3yLaZ3SDdFfUYUY0mzgYuA483sBUlrgbOpKkBfY2ZrJH2ZSmSvL993m9kxks4GPg+cJen4st1bgMOBWyUdZ2YvDzW2p2XsWmreaSGB60IWx9wUUdN1RowzTsu4PnXdFJOBV0v6LTAN2Am8H/ho+fsq4AoqMV5YfgZYB1wnSaV9jZm9CDwkqQ84EfjxUIN6RlO4WsZOLpGw8aSupeb9xvISfs9z8lyZuc1tpPFhRDE2s0ck/Q/gYeAF4HvAFuApM9tTuvUDs8vPs4HtZds9kp4GXl/aOz+Zzm1+j6TFwOLy67Pbllz6i9GeFHAI8MR+bNdmWn1ODV4RW31e+5kJbP/OySm3wrb937TlnxV/1HQHz7B746227pCa3cf0f1HHTTGDyqo9CngK+AfgtEG62sAmQ/xtqPa9G8xWACtGOq7hkHSnmc1rso+2EfGcIOZ5RTwniHtenZjZqd0a+4AafT4APGRmj5vZb4FvAf8GmC5pQMznADvKz/3AXIDy99cBuzrbB9kmSZJkQlNHjB8G3i1pWvH9ngTcC/wQ+HDpswi4pfy8vvxO+fsPzMxK+9mSpkg6CjgWuH1sTiNJkqS3qeMz/omkdVTha3uAn1K5Ef4RWCPpqtJ2Q9nkBuDGMkG3iyqCAjO7p0Ri3Fv2c8FwkRQNaeTmaCkRzwlinlfEc4K459UKVBmtSZIkSTep46ZIkiRJxpkU4yRJkhYQSowlnSrpF5L6JC3t9vGMBZLmSvqhpPsk3SPpU90+prFC0iRJP5X0nW4fy1ghabqkdZL+pXxm/7rbx9QUSReXa+/nkm6SNLXbxxSRMGIsaRLwJaoY6OOBj5Ql2L3OHuASM3sz8G7ggiDnBfAp4L5uH8QYcy3wXTP7Y+Ad9Pj5daRDmGdmbwUmUSblk7EljBhTLa3uM7MHzewlqiRGC7t8TI0xs51mtrX8/AzVzf2KlYu9hqQ5wJ8DX+n2sYwVkg4G3keJLDKzl8zsqe4e1ZgwkA5hMlU6hFwfMA5EEuPfL8MuDLrcupeRdCTwLuAn3T2SMeGLwF8Dv+v2gYwhbwQeB75a3C9fkXRQtw+qCWb2CDCQDmEn8LSZfa+7RxWTSGJca7l1ryLpNcA3gSVm9ptuH08TJH0QeMzMtnT7WMaYycCfANeb2buA54CenrvYJx3C4cBBkj7W3aOKSSQxDrvcWtKBVEL8dTP7VrePZwx4D/AhSduo3Envl/S17h7SmNAP9JvZwJvLOipx7mWGSoeQjDGRxPgO4FhJR0l6FdUkw/ouH1NjyhL0G4D7zOwL3T6escDMlpnZHDM7kupz+oGZ9by1ZWa/BrZLelNpGkgd0MsMlg6hpycl20qryy6NhpKu80JgI9WM70ozu6fLhzUWvAc4B7hb0kBW8E+b2YYuHlMyNJ8Evl4MggepquL0LMOkQ0jGmFwOnSRJ0gIiuSmSJEl6lhTjJEmSFpBinCRJ0gJSjJMkSVpAinGSJEkLSDFOkiRpASnGSZIkLeD/A+3yAkDXzsOpAAAAAElFTkSuQmCC\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAWMAAAEICAYAAACK8ZV4AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJztnX2QX2WV5z9fEkwMikkE2ZCEAXlxxPchRdzVdTPi8Dau2T/kRVcMyFRmq0AMAzskrjtQiGzc2hKZwsVJSZwElZCJWqScrDGoKWd2RSDRBYERWoikSeQtAXlbMHj2j/u0/hL65XZu9/nd3+nzqerq7qefe597+3fv9557nvOcIzMjSZIk6S4HdPsAkiRJkhTjJEmSVpBinCRJ0gJSjJMkSVpAinGSJEkLSDFOkiRpASnGybghabOkv/DedqyR9KykN3b7OJLYpBgnIyJpm6QPdPs4BpB0hSSTdNE+7UtK+xU191NL8M3sNWb24H4ebpLUIsU46VXuBxbt0/bx0j4mSJo8VvtKkpFIMU72G0kzJH1H0uOSdpef5+zT7WhJt0t6WtItkmZ2bP9uSf9H0lOS/q+kBaMY/g5gmqS3lH29BXh1aR/x+CR9Dvi3wHXFDXFdaTdJF0h6AHigo+0YSa+S9DNJnyztkyT9b0l/M8p/XZK8ghTjpAkHAF8F/gg4AngBuG6fPh8HPgEcDuwB/hZA0mzgH4GrgJnApcA3JR06ivFvLPuHykpeXff4zOy/AP8EXFjcEBd2bPcfgPnA8Z07M7OXgI8BV0p6M7AUmAR8bhTHnCSDkmKc7Ddm9qSZfdPMnjezZ6hE6d/t0+1GM/u5mT0H/FfgTEmTqERtg5ltMLPfmdkm4E7g9FEcwteAj0g6EDi7/D7a4xuM/2Zmu8zshUHO+edUD5BvUz1AzjGzl0dxzEkyKCnGyX4jaZqkv5P0K0m/AX4ETC9iO8D2jp9/BRwIHEJlrZ5RXBRPSXoKeC8wq+74ZvYw0AdcDTxgZp1j1T2+wdg+wt9XAUdSPUweqHu8STIcKcZJEy4B3gTMN7ODgfeVdnX0mdvx8xHAb4EnqATvRjOb3vF1kJktH+UxrC7Hsa+Los7xDZWycKRUhv8T+A5wiqT3ju5wk2RwUoyTuhwoaWrH12TgtVR+2KfKxNzlg2z3MUnHS5oGXAmsK6/1XwP+vaRTykTYVEkLBpkAHImbgZOBtYP8baTjexQYVfywpHOAE4BzgYuAVZJeM8pjTpJXkGKc1GUDlbANfF0BfJEqguEJ4Dbgu4NsdyPw98CvgalUAkZxKSwEPg08TmUp/2dGeU2a2Qtmdutg/t0ax3ct8OESafG3I40l6Yiyz4+b2bNm9g0qP/c1oznmJBkMZXL5JEmS7pOWcZIkSQtwF2NJp0r6haQ+SUu9x0+SJGkjrm6KElJ0P/BnQD/VaqmPmNm9bgeRJEnSQrwt4xOBPjN7sKxmWkM1iZMkSTKh8U6EMpu9A+r7qZad/h5Ji4HFAAdN0wl/fMyrXA7s7t2jWYXbjLfNeNxlnPvvmuYyDsBxb3/ebSzPz8oTr+vCE8/P6qXt/U+YWaMBT/nTg+zJXfUWVG6568WNZnZqk/E68RZjDdK2l5/EzFYAKwCmHDHXnvzEEo/j4oWzvuwyTsXckbuMAacc/k6XcQA2bvyZ21hH3/yf3Mb6ZcDrIiqTZvGrpvt4ctfL3L7xiJrjPXBI0/E68Rbjfva+4uYAO4bqPGX7cxxz8W3jflAAnOUzDPiJyS93eAqJH54CGVH4PR/Sfde8222sKlVI7+ItxncAx0o6CniEKrnLR4fqfNzbn3e1uLyI+ICJittnBW6f18YdfvfUKYe7DcU2v6HGBVcxNrM9ki4ENlKlHlxpZvd4HkMb8LoZXN0Ujje4q7Xq+Hbh9Xl5Wque/79JtVNMtRP3SgZmtoFqae2I3H/XNLcLNKKYpJuiORFf6X394EldWl1WxtNN4XnTRRXJiHhakem+mti0Wow9LWPPm+7om+NZQBEnurzHSpGc2LRajF+cexB9l3jOxsbC09o/hngTXd5ENDzSJVKfVovx22Y8zu0Bw33cJvAujndOENcK9yKqz70q+tK7tFqM7959aMjJrjynZkQMNwPfB5oXnueU0RTjiOeij6MJKCaOQuLqn3b8rLz8+xDTCk/q02ox9vQZR5yoiWqtevqnI1qrSTtptRh7EjGe1FMgc1KoORFj6pP6tFqMPd0UvmvoffC96TyX2MZ7cIJzRErSOlotxp6LPiL6BiNGiHgT0X2VtJNWi7FnNIUnEaMpkt4h4j1VkVnbxg3XFJoRCWpp+bqU4rlfooY89jqtFuOo0RReN11UN0XUZd4Rfcaen9UknzoU40arxThqnHG6D3oHzzezqH73pB6tFmPf5PLxlvNGtPYhboSD79JhH7LSR31aLcauy6GjZgJzImycccRyXK4PabehstLHeOKZKCiTzzQjqhUe0Rce9f+XuSnGkaihbV5EfcD41nCLJ1xR/3/hs7ZJWgl8EHjMzN5a2mYCNwNHUr0dnGlmuyUJuBY4HXgeONfMtpZtFgGfKbu9ysxWjTS2p2WcNCPqQzPiZKvrZ3WN31AsWec42NgjMxu+g/Q+4FlgdYcY/3dgl5ktl7QUmGFml0k6HfgklRjPB641s/lFvO8E5gEGbAFOMLPdw419sGbafJ3U7AxrkjPZvUPUycKI7itPJs3q22Jm85rsY947ptrtG4+oOd4DjcfrZETL2Mx+JOnIfZoXAgvKz6uAzcBlpX21VQp/m6TpkmaVvpvMbBeApE3AqcBNw43tuxw63it9xHMC70T28XKWRIzaqAjuphiCw8xsJ4CZ7ZT0htI+G9je0a+/tA3V3hoiTkB5hmWFzAdNzNVqEV0vkBN4+6JB2myY9lfuQFoMLAY4Ynar5xeTDqKm6/SMP083xcRmf9XuUUmzilU8C3istPcDczv6zQF2lPYF+7RvHmzHZrYCWAEw5Yi5FnFiyMtijZgWFOKKVkT3Qc7F1Gd/xXg9sAhYXr7f0tF+oaQ1VBN4TxfB3ghcLWlG6XcysGykQbIgaVOChjAFTYCUwjWxqRPadhOVVXuIpH7gcioRXivpfOBh4IzSfQNVJEUfVWjbeQBmtkvSZ4E7Sr8rBybzkvEj5gMm7sRkVvqY2IwY2tZNMrStd4gqkEkzPK+LbUsujR3a1k2ihrZ5EVW0Uvh7h4mcQlPSXGA18K+A3wErzOzaofq3Woyj4hV5cMrFfm6KqBWbU/ib4WvktC5r2x7gEjPbKum1wBZJm8zs3sE6pxgHJl0vzYmYyD5qhsK2WcZlLcbAeoxnJN1Htb4ixbgteIlkRNcLxE34HtEyTirKKuZ3AT8Zqk+KcSEtoN7B0/0S8YEW9broAodIurPj9xVlncReSHoN8E1giZn9ZqidtVqM779rWshwn4i5KaJaq565KSKKZHCf8RMjRVNIOpBKiL9uZt8arm+rxdgzmiJiTK6rte+Ym8KzeoRrCkgnoj6kt7mNVI+SUvgG4D4z+8KI/TPOuCLi0uGIllbSW3gaObfaulbFGUt6L/BPwN1UoW0AnzazDYP1b7Vl/OLcg+i7xEckI/qMMyyrORHfmCKeE7Qva5uZ/TODJ0kblFaLcVQiClfEiS7A1U3h5Z92Ta0a22c8prRajKdsf87N5xQxH2/ESclkDHBNtORoGbcszni0tFqMPd0UEaMBwlqrjkR8yER1U/Q6rRZjT8vYE6+bwbOiQ9S6dBFzDEdNzt/rtFqMPcnCk83wtYAcxwqZOzkFso20WoyjRlNEXMiSLpHmeF2D6aZoJ60WY08iXqBRg/s932Jc3WROVngKZDtptRh7+owjXqBRV+C5unlCuin88PW59zmONfa0WoyjLof2suxcrTrXeNygwu+E6//PcRK5bYs+Rkurxfju3Ye6XTiegfARrX3PhDoRXQeeRHzARKBOQdJBS4dImgncDBxJlaPjTDPbXZJjXEtVmPR54Fwz21r2tQj4TNn1VWa2arixo4a2RfQZR3WJRIw8iPgWWBF/Bd6gpUOAc4Hvm9lySUuBpcBlwGnAseVrPnA9ML+I9+XAPMDKftab2e6xPqn9IX3GzYgq/BGJGoYYfgXeMKVDFgILSrdVwGYqMV4IrLYqHdxtkqZLmlX6bjKzXQBF0E8FbhpqbE+fsScRQ9s8SeHvHXICrz6j8hnvUzrksCLUmNlOSW8o3WYD2zs26y9tQ7XvO8ZiYDHAVPySy7u+TrlNdsUU46hEfEhP5Kxto6W2GO9bOqRyDQ/edZA2G6Z974aqbMkKqPIZ1z2+pkSNk/UiLcjmRHyTScu4PrXEeIjSIY9KmlWs4lnAY6W9H5jbsfkcYEdpX7BP++bhxo3rpvAZx3eiy4+oD86IDzRXw2PJOr+xxoE60RRDlQ5ZDywClpfvt3S0XyhpDdUE3tNFsDcCV0uaUfqdDCwbbmzPGniuq7ocw+i8iBpN4Sn8XoVWo6ZWDT+BB7wHOAe4W9LAp/hpKhFeK+l84GHgjPK3DVRhbX1UoW3nAZjZLkmfBe4o/a4cmMwbCt9FHy7DAJ43Q7y3CnDOI+JYiTqimyKpT51oiuFKh7yiQF2JorhgiH2tBFbWPTjPRR++hSfzpmtC1BVkycSm1Svw3jbjcW4PmMnKa1VXxORHENO3Cn4PmYgZCismwARet/D0GecrYu8QNRudlxUe9c1iwoS2dQPPfMaeuRW8iFrpw3MCNGJO6KhvFr1Oq8U4rJvCCc/JJ08iCiTEFMl0U9Sn1WLsOYEXcaImqpBEPS8v0k3RTlotxp5Z2zytSK+Y5ohC4k1E4c8JvHbSajGOWgPPK7QtousF4vqMvYgaZZOW8TjiaRlHXdXlRUTR8iZi5JBvodr4+Yy7hqdl7EnE5PIxVzA6h9E5WfxRQwO3uY00PrRajKMWJHV7TXRcVRhVIGMWP3W81oNG9IwHrRbjpBlRa8VFjdzwIid220mrxdgzUVDUcJ+IuE5MuuYs8SHqtZ4TeEGIaEVGnTWPWsMt6T537z6UY9b+Zc3eYzth2GoxzqxtzfDM0ey5nDzqa3ZEl4gvGU2RjBKvmy5iCB3g6p/2xOshE/WNaSIkl58QeAqX1wUacVUhpBWexKTVYuyaKMhRuCIG93vGGUcs6AqOboqALrkItFqMk2ZEnTX3tIwjpgaN+jDrdeoUJJ0K/AiYUvqvM7PLJR0FrAFmAluBc8zsJUlTgNXACcCTwFlmtq3saxlwPvAycJGZbRxu7KgFSb1efT1FK+qqLk/8VmamGLeROpbxi8D7zexZSQcC/yzpfwF/BVxjZmskfZlKZK8v33eb2TGSzgY+D5wl6XjgbOAtwOHArZKOM7OXx+G8Rk1E32DU7FyZB6MZEa/1CNQpSGrAs+XXA8uXAe8HPlraVwFXUInxwvIzwDrgOkkq7WvM7EXgIUl9wInAj8fiRJJXkgLZHM//odfbmeebha9LZAKEtkmaBGwBjgG+BPwSeMrM9pQu/cDs8vNsYDuAme2R9DTw+tLeeRV0btM51mJgMcCkGTPou9znw/ScgPLC90aIKcaeuFmsjqGB6RKpTy0xLq6Ed0qaDnwbePNg3cp3DfG3odr3HWsFsALgYM00r6e4p3BF9XlGJKrF74WnS2RCxRmb2VOSNgPvBqZLmlys4znAjtKtH5gL9EuaDLwO2NXRPkDnNoPim5sin+BJMtZkPuP61ImmOBT4bRHiVwMfoJqU+yHwYaqIikXALWWT9eX3H5e//8DMTNJ64BuSvkA1gXcscPsYn89+k6kSe4f0hScRqWMZzwJWFb/xAcBaM/uOpHuBNZKuAn4K3FD63wDcWCbodlFFUGBm90haC9wL7AEuGCmSwjM3RdxJDR88H2ZRfeFuxXcdP6t0U9SnTjTFXcC7Bml/kCoaYt/2/wecMcS+Pgd8bvSHOf74JkePN2ue+Yx7h4jnVBHcTdFNPJdDexKxeknUGzyi8EeNMw5vGU8UIvoho66Ki/qQiSiSOYFXn1aLcdTl0BEn1qJmbYtI1AdMWsbjiGdom+eij6OJ9zoa9QaPSNTrIi3jccQzmsIz65ib8AdNwh7RpQRBiw4ErCE4XrRajD1xfYI7XaCer/O5qrA5uRx6YpNiXMhX32Z4uV4ghb8pEXM0A2xzG2l8aLUYRw1ti0jMFYwx8a0A4yf8vU6rxTgqEWNyo4a2RUyh6fngdC1IOsttqHGh1WLsGdoWMQeBa3L5gDUEwfu6iHcNJvVptRh7hrYlzYiYb6Mi3vUX0dqvyNC2ccPTMvbEK4zO83/nW5DUz80TUbg8PyvPh1ku+hhHolrG6XppRlT/tJdwRXzAVKRlnIwSrxs84lsFxH3IeBHxAQNpGYch4nLeqFEHcSfwfEjLuJ2kGBcilrX3FJKIogUxH9JR6z1ucxtpfEgxLkR9pfciYuw0xFyZGdXnnnHG40jUOGMv4YqaF8BTTDzjpyNGU2TWtvrUFuNSA+9O4BEz+6Cko6iKkc4EtgLnmNlLkqYAq4ETgCeBs8xsW9nHMuB84GXgIjPbOJYn0wRXy9gpUVDcVIl+RJzsiuh6gfZN4ElaCXwQeMzM3jpS/9FYxp8C7gMOLr9/HrjGzNZI+jKVyF5fvu82s2MknV36nSXpeKripG+hqg59q6TjhitK+uLcg+i7JGC9uKQRUf3TXkR0vbSUvweuozJOR6SWGEuaA/w5VTHRv5Ik4P3AR0uXVcAVVGK8sPwMsA64rvRfCKwxsxeBh0r16BOBH9c5hvEmogXkSd7gSbI3ZvYjSUfW7V/XMv4i8NfAa8vvrweeMrM95fd+YHb5eTawvRzMHklPl/6zYa98ep3b/B5Ji4HFAJNmzKh7HskEIm5olg8RI4cq+hzHAuAQSXd2/L7CzFbs785GFGNJAz6PLZIWDDQP0tVG+Ntw2/yhoTqZFQAHa6a5uQ8yLWMyCGnxNyN4NMUTZjZvrHZWxzJ+D/AhSacDU6l8xl8EpkuaXKzjOcCO0r8fmAv0S5oMvA7Y1dE+QOc2XSfqpEZEIka+eJLXXzsZUYzNbBmwDKBYxpea2X+U9A/Ah6kiKhYBt5RN1pfff1z+/gMzM0nrgW9I+gLVBN6xwO3Dje2Zm8IzDCxiTHPUSbUUrsSLJnHGlwFrJF0F/BS4obTfANxYJuh2UUVQYGb3SFoL3AvsAS4YLpICfAuSRgyE9xT9fLNIkr2RdBOwgMq33A9cbmY3DNnf7BVu29ZwsGbafJ3kMlZUyy4iUYU/Fzg1Y9uSS7c09eFOOWKuzb60XsDyQ59qPl4nrV6BFzWFpheZUKe3iPjG5Lnar22LPkZLq8XYk4jCFdUCSjdFM/LB2U5aLcaeuSl840nzZugVMpqiGcHjjMeUVouxp5vilMNdhqlwimmOmG8D4r5mRyR4nPGY0mox9iTiq5tvcnm3ocKSIY8Tm1aLcdTQNrdUiY6vo3nTNSf/hxObVovxlO3PuYlkxERBEf2dEDc1aMRrPfMZ16fVYhw1tM3rdTSqvzPqQyYt44lNq8XY003haW15uSk8l3hnuFkyGFkDrz6tFmNPN4Vn1raIr6NRyYdMMzKaoj6tFuOoboqIRHUdRCQX6LSTVouxJ65i4hSTG7UgadQJPK/zSoFsJynGhYg18HzjjB1jZB1dSilciRcpxoWI5XU8LeNj8HuYRcwjEpUMbatPq8XYMzeFp5h43eCeN0KKVnMiRg55jpVZ28aRF+ceRN8l8VarRYwzjlok1NN9FTEuPBMF1afVYuxJxLwAEc/Jm7T4Ey9aLcauccaORHRTeJKTar1DxhnXp5YYS9oGPAO8DOwxs3mSZgI3A0dSLX4508x2SxJwLXA68DxwrpltLftZBHym7PYqM1s13LieboqIop+i1ZyIoW1JOxmNZfynZvZEx+9Lge+b2XJJS8vvlwGnUVV+PhaYD1wPzC/ifTkwDzBgi6T1ZrZ7DM4jScaFiDHNKfrtpImbYiFV5VOAVcBmKjFeCKy2qtLpbZKmS5pV+m4ys10AkjYBpwI3DTXA22Y8zu1eF45j7GrE6iVRb3BPv7tbRI/jtZ7Up64YG/A9SQb8nZmtAA4zs50AZrZT0htK39nA9o5t+0vbUO17IWkxsBhgKjHLLnnddCmQzckJvMSLumL8HjPbUQR3k6R/GaavBmmzYdr3bqiEfgVUZbMj+owza1syGF4PmXzAtJNaYmxmO8r3xyR9GzgReFTSrGIVzwIeK937gbkdm88BdpT2Bfu0bx5u3KjRFF5EjJ2GuGIS8bwyzrg+I4qxpIOAA8zsmfLzycCVwHpgEbC8fL+lbLIeuFDSGqoJvKeLYG8ErpY0o/Q7GVg23Nhxs7Z5FVlNgewlIlrGGdpWnzqW8WHAt6uINSYD3zCz70q6A1gr6XzgYeCM0n8DVVhbH1Vo23kAZrZL0meBO0q/Kwcm84bCczl0RDHx9IN7FiSN+FlBzPPK3BT1GVGMzexB4B2DtD8JnDRIuwEXDLGvlcDKugfnGWfsmlTHyfXiurw2YDRKVCKKfgRavQLPE0/ftNfNENVNETVkz8+K9PussuxSfVotxp5xxkfj+TrlczP4pgWNaW1lnHEz0mdcn1aLsafPOGLGrJhWnfN5OT6kMzxwYtNqMY5KxGWvnq+jKZBJRFotxp4TeJ6v2RFvcM/XUc/JVk+LP2LV8F6Lppiy/TmOXlLvc3io8Wh702oxds1N4YjXBRp1wYxnVRZPv7vfWDENj6z0MY54+owjztCfcnHMaIqo/mkvokbZ9DqtFmNfN0U8ot4IUZd5exkEESerI9BqMXYNbQtYFSPq4oh8yCQRabUY37370JCRB7nEuxkRrdWKeJ9XJgqqT6vF2DNrm2u4VMDXxKgCGTFW2/OcctFHfVotxlGJGE3ha4XHsyAh3RQTnVaLsecEXkQ3hScR43G9iehW6rU4427SajGOOoHn5aaIGgIWdQVexDcmT5dcr8cZq8p42U4O1kybr1dk6RwXIlolSXOiPtAiMmlW3xYzm9dkH6PRnFttXePxOmm1ZexZ6SNiIHxEdwh4Rzj4kVE2E5tWi7FnaBvX+AxTES+Fpq9VF1RMHFNbepGhbfVptRh74jop5HTTRfR3QtzJ1ogWa4a21aeWGEuaDnwFeCtgwCeAXwA3A0dSJdk/08x2qyqWdy1VHbzngXPNbGvZzyLgM2W3V5nZquHG9YwzjngjuFoljm8WrjP0rucVL3IoqU9dy/ha4Ltm9mFJrwKmAZ8Gvm9myyUtBZYClwGnAceWr/nA9cB8STOBy4F5VIK+RdJ6M9s91KCePuOIll1cN0WSxGNEMZZ0MPA+4FwAM3sJeEnSQmBB6bYK2EwlxguB1aUw6W2SpkuaVfpuGqgILWkTcCpw01Bju/qMHXGzWF394H5EvCYg5gMt44zrU8cyfiPwOPBVSe8AtgCfAg4zs50AZrZT0htK/9nA9o7t+0vbUO17IWkxsBhgKtNiuimcfMaeSdg9iShaEHM5dOYzrk8dMZ4M/AnwSTP7iaRrqVwSQ6FB2myY9r0bzFYAKwCmHDHXvFbgeQqX22RhVD+uIxFLV0VdNNPr1BHjfqDfzH5Sfl9HJcaPSppVrOJZwGMd/ed2bD8H2FHaF+zTvnm4gaOuwIsYJxs1ciNiNEXUslXh3RRm9mtJ2yW9ycx+AZwE3Fu+FgHLy/dbyibrgQslraGawHu6CPZG4GpJM0q/k4FlY3s6+49naFtEMU6aE/HtIt0U9akbTfFJ4OslkuJB4DzgAGCtpPOBh4EzSt8NVGFtfVShbecBmNkuSZ8F7ij9rhyYzBuKuIs+fIgYOw3Or76O5xUxtC0t4/rUEmMz+xlVSNq+vGIRd4miuGCI/awEVo7mAL2IeIFGjJ2GwBninAwCT9eLZ/HYbW4jjQ+5Aq8QMc444mtv0hzPh3TU/CjjQavF2HMCz/Oi8ZzNjkhUN4VXzo2I6WJhgiyH7hZRfcZer75RV+Clxd8MT9fLKRdnoqC6tFqMPYlaPcKLqH7cjHxJvEgxLnjedCn8zfD0eYZcDJS0klaLcdRFH35i4ilafv+/qFa4F2Gt/SXrun0EjWi1GEfFa7Iw7E2X9AyeD7NtbiONDynGBdeE5U6TGjmp1hxfl0i8REGe0SgZTTGORE2h6RkI70VU4Y8Yf560k1aLsScRS817Tj5FFZKIS8ojGjgV7VoOLelUqsIck4CvmNny4fqnGBfiXqA+RF1i60lEN4XndbHNbaSRkTQJ+BLwZ1QZK+8olY3uHWqbFOOk54i7mMUrd7djNErQB2cNTgT6zOxBgJLFciFVtstBSTEOTETXCwReQRYwc+AEZrDKRvOH26DVYuxZHTqitZXxuM3xzXwXLzeF5/+vC9EUh0i6s+P3FaVSEdSsbNRJq8X4xbkH4VV2KeIEVFTL2Jd4Gc48k/cEz2f8hJkNlloYhq54NCStFmNPyziimETMxetN1AxnybhzB3CspKOAR4CzgY8Ot0GrxdjTMo5IrsBrTsQ3Jtccw0Ef0iNhZnskXQhspAptW2lm9wy3TavFOKrPOEkmChO5Bp6ZbaAqQ1eLEcVY0puAmzua3gj8DbC6tB9JFeJ3ppntliSqQOfTqWrgnWtmW8u+FgGfKfu5ysxW1T3Q8SbiBFQ+YJrjaUW6fV6O1uoph/uNFT6fcakI/U74fSDzI8C3gaXA981suaSl5ffLgNOAY8vXfOB6YL6kmcDlVLX0DNhSgqB3DzX2cW9/no0bfSZQQt50SWOipuv0wvVan2BZ204Cfmlmv5K0EFhQ2lcBm6nEeCGwuhQmvU3SdEmzSt9NAxWhJW0CTgVuanoSY0HEfMZRRT9qnLHXAomIYZzQPjfFaBmtGJ/NH8TzMDPbCWBmOyW9obQPFuw8e5j2IYmaKCiqSHqR/79mRF0OHd5NMYCkVwEfApaN1HWQNhumfd9xFgOLAaYyLX25SVeJWOLJ1cDxjKaYQG6K04CtZvZo+f1RSbOKVTwLeKy0DxXs3M8f3BoD7Zv3HaSsYFkBMO8dU83LZ5ypEpsR1ececQVe1M+q1xmNGH+Evf2764FFwPLy/ZaO9gtLYoz5wNNFsDcCV0uaUfqdzAhWdlQ3hReDQLt9AAAGz0lEQVRRFyx4ztB7+oy9hMt3cYnjcuiJ4DOWNI0qFdxfdjQvB9ZKOh94GDijtG+gCmvrowptOw/AzHZJ+izVyhSAKwcm89qA5+toxNV+YV99HXG7Bh2rb6TPuD6qgh7aycGaafN1kstYGU3RjIi+VYg82eWD52e1bcmlW4bJFVGL0WjOrbau8XidtHoFnudy6Khi4kUKZHO8/NOuhQCyIGltWi3Gb5vxOLc73XiueWudiPqAieoLdzsvRzePa5xxFiQdPzwn8DwrEkQM1/M8J98IBz8iRtkk9Wm1GHsS0b8a8Zy8yWieXqJdBUlHS6vFOGrWNi/LzjPXga+bIv3TTfB8s5ioBUn3h1aLsW+iIJdhqrG8/NOOvsH0TzfHy1UWNgxxAq3AC02+0jfD19pyG8qViFE2SX1aLcb33zXN7zXH8QmeN10z8sHZjKgThRNiBV63iFp2yetm8PTXRVxV6E3E68KX3l6B12ox9owz9vSjRQzXc61EHbTUfMQ444hvFuNFq8U4aUbUWfOouSm8HmieD7Os9FGfVoux56KPiJZdxEgAiFvpI+JcQi6Hrk+rxTiqmyIiEYXEGy/hcl3B6JghLpdDjyNRoyki+tEyUVBz/Cp9xPysep1Wi3FU0opsRtxEQXldTGRaLcZRQ9vSMu4dcmKyGb7uv8xNMW64ptAM+DqaNCdiDbyo9R5z0UcQUiCTwXCNSPF6YwpogUeg1WKcE3jNyGXDSbdJN0V96hYkvRj4C8CAu6mKjM4C1gAzga3AOWb2kqQpwGrgBOBJ4Cwz21b2sww4H3gZuMjMNg43rmfWtpw86R3yLaZ3SDdFfUYUY0mzgYuA483sBUlrgbOpKkBfY2ZrJH2ZSmSvL993m9kxks4GPg+cJen4st1bgMOBWyUdZ2YvDzW2p2XsWmreaSGB60IWx9wUUdN1RowzTsu4PnXdFJOBV0v6LTAN2Am8H/ho+fsq4AoqMV5YfgZYB1wnSaV9jZm9CDwkqQ84EfjxUIN6RlO4WsZOLpGw8aSupeb9xvISfs9z8lyZuc1tpPFhRDE2s0ck/Q/gYeAF4HvAFuApM9tTuvUDs8vPs4HtZds9kp4GXl/aOz+Zzm1+j6TFwOLy67Pbllz6i9GeFHAI8MR+bNdmWn1ODV4RW31e+5kJbP/OySm3wrb937TlnxV/1HQHz7B746227pCa3cf0f1HHTTGDyqo9CngK+AfgtEG62sAmQ/xtqPa9G8xWACtGOq7hkHSnmc1rso+2EfGcIOZ5RTwniHtenZjZqd0a+4AafT4APGRmj5vZb4FvAf8GmC5pQMznADvKz/3AXIDy99cBuzrbB9kmSZJkQlNHjB8G3i1pWvH9ngTcC/wQ+HDpswi4pfy8vvxO+fsPzMxK+9mSpkg6CjgWuH1sTiNJkqS3qeMz/omkdVTha3uAn1K5Ef4RWCPpqtJ2Q9nkBuDGMkG3iyqCAjO7p0Ri3Fv2c8FwkRQNaeTmaCkRzwlinlfEc4K459UKVBmtSZIkSTep46ZIkiRJxpkU4yRJkhYQSowlnSrpF5L6JC3t9vGMBZLmSvqhpPsk3SPpU90+prFC0iRJP5X0nW4fy1ghabqkdZL+pXxm/7rbx9QUSReXa+/nkm6SNLXbxxSRMGIsaRLwJaoY6OOBj5Ql2L3OHuASM3sz8G7ggiDnBfAp4L5uH8QYcy3wXTP7Y+Ad9Pj5daRDmGdmbwUmUSblk7EljBhTLa3uM7MHzewlqiRGC7t8TI0xs51mtrX8/AzVzf2KlYu9hqQ5wJ8DX+n2sYwVkg4G3keJLDKzl8zsqe4e1ZgwkA5hMlU6hFwfMA5EEuPfL8MuDLrcupeRdCTwLuAn3T2SMeGLwF8Dv+v2gYwhbwQeB75a3C9fkXRQtw+qCWb2CDCQDmEn8LSZfa+7RxWTSGJca7l1ryLpNcA3gSVm9ptuH08TJH0QeMzMtnT7WMaYycCfANeb2buA54CenrvYJx3C4cBBkj7W3aOKSSQxDrvcWtKBVEL8dTP7VrePZwx4D/AhSduo3Envl/S17h7SmNAP9JvZwJvLOipx7mWGSoeQjDGRxPgO4FhJR0l6FdUkw/ouH1NjyhL0G4D7zOwL3T6escDMlpnZHDM7kupz+oGZ9by1ZWa/BrZLelNpGkgd0MsMlg6hpycl20qryy6NhpKu80JgI9WM70ozu6fLhzUWvAc4B7hb0kBW8E+b2YYuHlMyNJ8Evl4MggepquL0LMOkQ0jGmFwOnSRJ0gIiuSmSJEl6lhTjJEmSFpBinCRJ0gJSjJMkSVpAinGSJEkLSDFOkiRpASnGSZIkLeD/A+3yAkDXzsOpAAAAAElFTkSuQmCC\n", "text/plain": [ "
" ] }, - "metadata": {}, + "metadata": { + "needs_background": "light" + }, "output_type": "display_data" } ], @@ -320,17 +313,19 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "metadata": {}, "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAV0AAAEICAYAAAD8yyfzAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAF4xJREFUeJzt3XuwXeV93vHvo7sQRhIIsJAg4qI6gaQIj2qwybh2SAs4nkBmQotTE3yp5T8gwalbD7hpiZ3SZDo2IZmxmZENMRlTCMa4Jh5sTLFjD22CEZdihKCWBYiDDujGzRaSzuXpH3sdcyqdo73OOXu/+6yj5zOz5uy19trv79064serd70X2SYiIsqY1esKREQcTpJ0IyIKStKNiCgoSTcioqAk3YiIgpJ0IyIKStKNrpK0UNLfSXpV0tckfUjSA6Pe/5mkU3pZx4iSknTjFyT9nqQNVSLsl/RtSb8+xWJ/FzgeOMb2JQe+aftI21va1GuVJEuaM8W6RPRckm4AIOnfATcA/5VWkjwJ+CJw0RSL/iXg/9oenGI5ETNCkm4gaTHwWeAK23fZ/rntAdt/Z/s/SJov6QZJ26rjBknzq8++R1KfpE9K2l61kD9cvfcZ4D8D/7pqPX90jNiWdFr1eqGkz0t6ruqOeEDSQuCH1e2vVOW8U9Jpkn5Q3bdT0t+W+LOKmKr8cy0A3gksAL4xzvv/ETgHWAMY+Cbwx8B/qt5/K7AYWAH8C+BOSf/D9rWSDJxm+4MAkj50iHp8DjgDeBfwInA2MAy8G3gGWDLSYpZ0G/Bd4L3APGDthL91RA+kpRsAxwA7D9EF8G+Az9rebnsH8BngslHvD1TvD9i+B/gZ8LaJVEDSLOAjwFW2X7A9ZPt/2943zkcGaHVdnGB7r+0HxrkvYlpJ0g2AXcCyQzyoOgF4btT5c9W1X3z+gIS9BzhygnVYRqu1/dOa938KEPAjSRslfWSC8SJ6Ikk3AP4B2AtcPM7722i1KkecVF3rpJ1VHU4d472DlsKz/aLtj9k+Afg48MWRvuGI6SxJN7D9Kq0HXl+QdLGkIyTNlXShpP8G3Ab8saRjJS2r7v1qh+swDNwMXC/pBEmzqwdm84EdtPp2fzGeV9IlklZWpy/TSsxDnaxTRDfkQVoAYPt6SS/RekB2K/A68DBwHfAIcBTweHX714D/0oVq/Hvgz4CHaHVP/B/gfNt7JF0H/C9Jc4ELgH8G3FCNvHiJVl/wM12oU0RHKYuYR0SUk+6FiIiCknQjIgpK0o2IKChJNyKioK6MXpin+V7Aom4UfZDFZ5RbR2W+ysTa57lF4gDo4CGwXTNfAwVjlRs9Nkdl4symUCCgf3BhkTivbHuDPS/vm9IXO/+9i7xrd73f98OP77vX9gVTiTdVXUm6C1jE2TqvG0Uf5MKvvVIkDsCqeTuLxNmy77gicQDmzyqXCE+dt71YrFPm7i4W6+hC/15cOqtMIgT47M5fKxLnpkv/fspl7No9xI/uPanWvbOX/2TZlANOUcbpRkSjGRhmuNfVqC1JNyIazZgBN2cyYpJuRDReWroREYUYM9SgmbVJuhHReMMFR+FMVZJuRDRaa3m55iTdWoNdJF0g6WlJmyVd3e1KRURMxDCudUwHbVu6kmYDX6C191Uf8JCku20/2e3KRUS0Y2CgQX26dVq67wA2295iez9wO1PfljsioiOMGap5TAd1+nRXAM+POu+jtUvr/0fSOmAdwAKO6EjlIiLaMgxNj3xaS52kO9a86LH2rFoPrAc4Skc36I8gIpqsNSOtOeok3T7gxFHnK+n8poQREZMkhgouBjRVdZLuQ8BqSScDLwCXAr/X1VpFRNTUepA2g5Ku7UFJVwL3ArOBm21v7HrNIiJqaI3TnUFJF8D2PcA9Xa5LRMSkDM+klm5ExHQ2I1u6ERHTlRFDDdp5LEk3Ihov3QsREYUYsd+ze12N2pJ0I6LRWpMjDvPuhcVnDBbbMPLbZywpEgfgmp9uLRJn8ew9ReIA7B4qs2szwN6Cuxw/tf/YYrGWFPp97R46skgcgM8cW2ZU6Lfn7O1IOXmQFhFRiC2G3JyWbnNqGhExjmFU62hH0omSvi9pk6SNkq6qrv+JpBckPVYd7xv1mWuqtcaflnR+uxhp6UZEo7UepHUslQ0Cn7T9iKS3AA9Luq967y9sf270zZJOp7U0whnACcD/lPRP7PG3J07SjYhG6+SDNNv9QH/1+nVJm2gtbzuei4Dbbe8DnpG0mdYa5P8w3gfSvRARjTdk1TqAZZI2jDrWjVempFXAWcCD1aUrJT0u6WZJS6trY603fqgknZZuRDTbBGek7bS9tt1Nko4Evg58wvZrkm4E/pRWw/pPgc8DH6HmeuOjJelGROMNd3D0gqS5tBLurbbvArD90qj3vwR8qzqd8Hrj6V6IiEZrLXgzq9bRjiQBNwGbbF8/6vryUbf9DvBE9fpu4FJJ86s1x1cDPzpUjLR0I6LRjBjo3DTgc4HLgB9Leqy69mngA5LW0MrxzwIfB7C9UdIdwJO0Rj5ccaiRC5CkGxENZ9OxyRG2H2Dsftpx1xO3fR1wXd0YSboR0XD1Jj5MF0m6EdFopnMt3RKSdCOi8bKIeUREIUZZxDwiopTWFuzNSWXNqWlExJiU9XQjIkoxnZ2R1m1JuhHReGnpRkQUYist3YiIUloP0rIbcEREIc3aI60rSXe+Blk1b2c3ij5IqR16Af7s1H9aJM6aR4uEAeCk+buKxTpz3ovFYm0ZXFws1ouFYvXtP6ZIHID7Z71RJM5rw1Mvo/UgLX26ERHFZEZaREQhmZEWEVFYpzamLCFJNyIazYaB4STdiIgiWt0LSboREcVkRlpERCFNGzLWtk0u6URJ35e0SdJGSVeVqFhERD2t7oU6x3RQp6U7CHzS9iOS3gI8LOk+2092uW4REbXMqD3SbPcD/dXr1yVtAlbQ2nI4IqKnWqMXZujaC5JWAWcBD47x3jpgHcCyE+Z2oGoREe01bXJE7U4OSUcCXwc+Yfu1A9+3vd72WttrFx+d53MRUc5wtQ17u2M6qJUdJc2llXBvtX1Xd6sUEVFf00YvtE26kgTcBGyyfX33qxQRMTHTZWRCHXVauucClwE/lvRYde3Ttu/pXrUiIuqxxeBMSrq2H4Bp0hkSETGGGdW9EBExnTWtT7c5bfKIiHEMW7WOdsabgSvpaEn3SfpJ9XNpdV2S/krSZkmPS3p7uxhJuhHRaCPjdDuRdHlzBu6vAOcAV0g6HbgauN/2auD+6hzgQmB1dawDbmwXIEk3IhqvU+N0bffbfqR6/TowMgP3IuCW6rZbgIur1xcBf+OWfwSWSFp+qBhd6dPd57ls2XdcN4o+yOLZe4rEgXIbRj52Vpk4AMc/UW724Pf2nFYs1poF5TYs3bDnlCJx9g6X+129MLC0SJwB9025DBsG6y9ivkzShlHn622vH+vGA2bgHl8tiYDtfkkjCW4F8Pyoj/VV1/rHq0AepEVE403gQdpO22vb3XTgDNzWdIWxbx3jmg9VdpJuRDRap9deGGcG7kuSllet3OXA9up6H3DiqI+vBLYdqvz06UZE49mqdbRziBm4dwOXV68vB7456vrvV6MYzgFeHemGGE9auhHReB1czGbMGbjAnwN3SPoosBW4pHrvHuB9wGZgD/DhdgGSdCOi0ezOTY5oMwP3vDHuN3DFRGIk6UZEw4mhbMEeEVFOnf7a6SJJNyIarWlrLyTpRkSzudWv2xRJuhHReNNlK546knQjotGcB2kREWWleyEioqCMXoiIKMRO0o2IKCpDxiIiCkqfbkREIUYMZ/RCREQ5DWroJulGRMPlQVpERGENauom6UZE4x32LV1h5s8a6EbRB9k9tKhIHICT5u8qEqfkDr33/upRxWKd/0SxUDw7sKxYrLOP+GmROK8NLygSB2BJoV22580anHIZBoaHD/OkGxFRjIHDvaUbEVFSxulGRJSUpBsRUUq97dWniyTdiGi+tHQjIgoxOKMXIiJKak7Srb1KhKTZkh6V9K1uVigiYsJc85gGJrI0z1XApm5VJCJi0mZa0pW0Evgt4MvdrU5ExASNTI6oc0wDdVu6NwCfAobHu0HSOkkbJG342cv7O1K5iIg6Wlv2tD+mg7ZJV9L7ge22Hz7UfbbX215re+2RS+d1rIIREW0Nq94xDdRp6Z4L/LakZ4Hbgd+Q9NWu1ioiYgLkekfbcqSbJW2X9MSoa38i6QVJj1XH+0a9d42kzZKelnR+nbq2Tbq2r7G90vYq4FLge7Y/WKfwiIiuq/sQrV73wleAC8a4/he211THPQCSTqeVE8+oPvNFSbPbBWjOxkIREWOq+RCtxoM02z8EdtcMfBFwu+19tp8BNgPvaPehCSVd239v+/0T+UxERNfVb+kuG3ngXx3raka4UtLjVffD0uraCuD5Uff0VdcOKS3diGi+4ZoH7Bx54F8d62uUfiNwKrAG6Ac+X10fq+ncthMj04Ajotm6vIi57ZdGXkv6EjAyK7cPOHHUrSuBbe3KS0s3IhqvU6MXxixbWj7q9HeAkZENdwOXSpov6WRgNfCjduWlpRsRzdehiQ+SbgPeQ6vvtw+4FniPpDVVlGeBjwPY3ijpDuBJYBC4wvZQuxhJuhERFdsfGOPyTYe4/zrguonE6ErSna8BTp23vRtFH2Svy+2ce+a8F4vE+d6e04rEgbI79JbcefgrWx8vFuvBfW8tEmf13B1F4gDsGl5YLFYnTLbroBfS0o2IZjPTZopvHUm6EdF8aelGRJST7oWIiJKSdCMiCkrSjYgoYyoTH3ohSTcimi+jFyIiyklLNyKipCTdiIhC0qcbEVFYkm5ERDka7nUN6st6uhERBaWlGxHNl+6FiIhC8iAtIqKwJN2IiIKSdCMiyhDNGr2QpBsRzZY+3YiIwpJ0IyIKOtyT7nwNccrc3d0o+iBP7T+2SByALYOLi8RZs2BrkTgAzw4sKxar5A69Hzrp14vF+tLWB4rEGSqYWB7af1yROPuG+ztSTroXIiJKStKNiCjEGb0QEVFWWroREeWkTzcioqQk3YiIQkyjkm7W042IRhNvbsPe7mhblnSzpO2Snhh17WhJ90n6SfVzaXVdkv5K0mZJj0t6e5361kq6kpZIulPSU5I2SXpnnc9FRJTQqaQLfAW44IBrVwP3214N3F+dA1wIrK6OdcCNdQLUben+JfAd278MnAlsqvm5iIjuc82jXTH2D4EDZ3ZdBNxSvb4FuHjU9b9xyz8CSyQtbxejbdKVdBTwbuCmqlL7bb/SvvoREYV0KOmO43jb/QDVz5HpeiuA50fd11ddO6Q6Ld1TgB3AX0t6VNKXJS068CZJ6yRtkLTh5d0NGqkcEc1Ws2uh6l5YNpKnqmPdFCJr7NocWp2kOwd4O3Cj7bOAn/Nmn8abkez1ttfaXrv06Dyfi4iC6rd0d47kqepYX6P0l0a6Daqf26vrfcCJo+5bCWxrV1id7NgH9Nl+sDq/k1YSjoiYFjRc75iku4HLq9eXA98cdf33q1EM5wCvjnRDHErbcbq2X5T0vKS32X4aOA94cnJ1j4jovE7NSJN0G/AeWt0QfcC1wJ8Dd0j6KLAVuKS6/R7gfcBmYA/w4Tox6k6O+APgVknzgC11C4+I6LoOTo6w/YFx3jpvjHsNXDHRGLWSru3HgLUTLTwioogGzUjLNOCIaLSRGWlNkaQbEY2n4eZk3STdiGi2hi14k6QbEY2X7oWIiJIO96Q7R1BqUtqS2XvKBAJeLLQb8IY9pxSJA3D2ET8tFuvBfW8tFqvUDr0AHyu08/DCHxxfJA7Ax1b8oEicuRrqSDlp6UZElJSkGxFRSHYDjogoJ+N0IyJKc3OybpJuRDReWroREaVkckRERFl5kBYRUVCSbkREKSYP0iIiSsqDtIiIkpJ0IyLKyOSIiIiS7CxiHhFRVHNybpJuRDRfuhciIkoxkO6FiIiCmpNzk3QjovnSvRARUVBGL0RElJJVxmA2Yumshd0o+iC7h44sEgegb/8xReLsHZ5bJA7Aa8MLisVaPXdHsVhDBf8jLLVh5Bv//KUicQB+srHMJqJ7/dyUy2hNjmhO1k1LNyKaL6uMRUSU08mWrqRngdeBIWDQ9lpJRwN/C6wCngX+le2XJ1P+rM5UMyKiRzyBo7732l5je211fjVwv+3VwP3V+aQk6UZEw7XWXqhzTMFFwC3V61uAiydbUJJuRDSfXe+oWRrwXUkPS1pXXTvedn8rlPuB4yZb1fTpRkSzeULb9SyTtGHU+Xrb6w+451zb2yQdB9wn6alOVHNEkm5ENF/9VuzOUf204xTlbdXP7ZK+AbwDeEnSctv9kpYD2ydb1VrdC5L+SNJGSU9Iuk1SucGdERHtdOhBmqRFkt4y8hr4l8ATwN3A5dVtlwPfnGxV27Z0Ja0A/hA43fYbku4ALgW+MtmgERGdpOGODdQ9HviGJGjlx/9u+zuSHgLukPRRYCtwyWQD1O1emAMslDQAHAFsm2zAiIiOMh2bHGF7C3DmGNd3Aed1Ikbb7gXbLwCfo5Xd+4FXbX/3wPskrZO0QdKGnbsaND0kIhpNGLneMR20TbqSltIao3YycAKwSNIHD7zP9nrba22vXXZMRqJFREGdHTLWVXWy428Cz9jeYXsAuAt4V3erFRExAQ1KunX6dLcC50g6AniDVr/GhkN/JCKikA726ZbQNunaflDSncAjwCDwKHDgYOKIiJ7p4OiFrqs1esH2tcC1Xa5LRMQkTJ+ugzoyIy0ims0k6UZEFNWc3oUk3YhovukyBreOJN2IaL4k3YiIQmwYak7/QleSbv/gQj6789e6UfRBPnPsxiJxAO6f9UaROC8MLC0SB2DJ7D3FYu0aLrNDNMBD+ye9xvSEfWzFD4rEKbVDL8C3z1hSJM6r7lAKSks3IqKgJN2IiEIMTG3/s6KSdCOi4Qw+zPt0IyKKMXmQFhFRVPp0IyIKStKNiCglC95ERJRjYKYt7RgRMa2lpRsRUUqmAUdElGNwxulGRBSUGWkREQWlTzciohA7oxciIopKSzciohTjoaFeV6K2JN2IaLYs7RgRUViDhozN6nUFIiKmwoCHXeuoQ9IFkp6WtFnS1Z2ub5JuRDSbq0XM6xxtSJoNfAG4EDgd+ICk0ztZ3XQvRETjdfBB2juAzba3AEi6HbgIeLJTAeQuDLWQtAN4boIfWwbs7Hhlem8mfq+Z+J1gZn6v6f6dfsn2sVMpQNJ3aH3POhYAe0edr7e9flRZvwtcYPvfVueXAWfbvnIqdRytKy3dyfwhStpge2036tNLM/F7zcTvBDPze83E73Qg2xd0sDiNFaKD5adPNyJilD7gxFHnK4FtnQyQpBsR8aaHgNWSTpY0D7gUuLuTAabTg7T17W9ppJn4vWbid4KZ+b1m4nfqGtuDkq4E7gVmAzfb3tjJGF15kBYREWNL90JEREFJuhERBfU86XZ7yl0vSDpR0vclbZK0UdJVva5Tp0iaLelRSd/qdV06RdISSXdKeqr6nb2z13XqBEl/VP39e0LSbZIW9LpO0eOkW2LKXY8MAp+0/SvAOcAVM+R7AVwFbOp1JTrsL4Hv2P5l4ExmwPeTtAL4Q2Ct7V+l9VDo0t7WKqD3Ld1fTLmzvR8YmXLXaLb7bT9SvX6d1n/EK3pbq6mTtBL4LeDLva5Lp0g6Cng3cBOA7f22X+ltrTpmDrBQ0hzgCDo83jQmp9dJdwXw/KjzPmZAchpN0irgLODB3takI24APgU0Zx299k4BdgB/XXWbfFnSol5XaqpsvwB8DtgK9AOv2v5ub2sV0Puk2/Upd70k6Ujg68AnbL/W6/pMhaT3A9ttP9zrunTYHODtwI22zwJ+DjT+2YKkpbT+1XgycAKwSNIHe1urgN4n3a5PuesVSXNpJdxbbd/V6/p0wLnAb0t6llY30G9I+mpvq9QRfUCf7ZF/idxJKwk33W8Cz9jeYXsAuAt4V4/rFPQ+6XZ9yl0vSBKtPsJNtq/vdX06wfY1tlfaXkXr9/Q9241vOdl+EXhe0tuqS+fRwWX8emgrcI6kI6q/j+cxAx4QzgQ9nQZcYspdj5wLXAb8WNJj1bVP276nh3WK8f0BcGv1P/4twId7XJ8ps/2gpDuBR2iNpnmUTAmeFjINOCKioF53L0REHFaSdCMiCkrSjYgoKEk3IqKgJN2IiIKSdCMiCkrSjYgo6P8BUbVg8QA+TJoAAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAV0AAAEICAYAAAD8yyfzAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAF4xJREFUeJzt3XuwXeV93vHvo7sQRhIIsJAg4qI6gaQIj2qwybh2SAs4nkBmQotTE3yp5T8gwalbD7hpiZ3SZDo2IZmxmZENMRlTCMa4Jh5sTLFjD22CEZdihKCWBYiDDujGzRaSzuXpH3sdcyqdo73OOXu/+6yj5zOz5uy19trv79064serd70X2SYiIsqY1esKREQcTpJ0IyIKStKNiCgoSTcioqAk3YiIgpJ0IyIKStKNrpK0UNLfSXpV0tckfUjSA6Pe/5mkU3pZx4iSknTjFyT9nqQNVSLsl/RtSb8+xWJ/FzgeOMb2JQe+aftI21va1GuVJEuaM8W6RPRckm4AIOnfATcA/5VWkjwJ+CJw0RSL/iXg/9oenGI5ETNCkm4gaTHwWeAK23fZ/rntAdt/Z/s/SJov6QZJ26rjBknzq8++R1KfpE9K2l61kD9cvfcZ4D8D/7pqPX90jNiWdFr1eqGkz0t6ruqOeEDSQuCH1e2vVOW8U9Jpkn5Q3bdT0t+W+LOKmKr8cy0A3gksAL4xzvv/ETgHWAMY+Cbwx8B/qt5/K7AYWAH8C+BOSf/D9rWSDJxm+4MAkj50iHp8DjgDeBfwInA2MAy8G3gGWDLSYpZ0G/Bd4L3APGDthL91RA+kpRsAxwA7D9EF8G+Az9rebnsH8BngslHvD1TvD9i+B/gZ8LaJVEDSLOAjwFW2X7A9ZPt/2943zkcGaHVdnGB7r+0HxrkvYlpJ0g2AXcCyQzyoOgF4btT5c9W1X3z+gIS9BzhygnVYRqu1/dOa938KEPAjSRslfWSC8SJ6Ikk3AP4B2AtcPM7722i1KkecVF3rpJ1VHU4d472DlsKz/aLtj9k+Afg48MWRvuGI6SxJN7D9Kq0HXl+QdLGkIyTNlXShpP8G3Ab8saRjJS2r7v1qh+swDNwMXC/pBEmzqwdm84EdtPp2fzGeV9IlklZWpy/TSsxDnaxTRDfkQVoAYPt6SS/RekB2K/A68DBwHfAIcBTweHX714D/0oVq/Hvgz4CHaHVP/B/gfNt7JF0H/C9Jc4ELgH8G3FCNvHiJVl/wM12oU0RHKYuYR0SUk+6FiIiCknQjIgpK0o2IKChJNyKioK6MXpin+V7Aom4UfZDFZ5RbR2W+ysTa57lF4gDo4CGwXTNfAwVjlRs9Nkdl4symUCCgf3BhkTivbHuDPS/vm9IXO/+9i7xrd73f98OP77vX9gVTiTdVXUm6C1jE2TqvG0Uf5MKvvVIkDsCqeTuLxNmy77gicQDmzyqXCE+dt71YrFPm7i4W6+hC/15cOqtMIgT47M5fKxLnpkv/fspl7No9xI/uPanWvbOX/2TZlANOUcbpRkSjGRhmuNfVqC1JNyIazZgBN2cyYpJuRDReWroREYUYM9SgmbVJuhHReMMFR+FMVZJuRDRaa3m55iTdWoNdJF0g6WlJmyVd3e1KRURMxDCudUwHbVu6kmYDX6C191Uf8JCku20/2e3KRUS0Y2CgQX26dVq67wA2295iez9wO1PfljsioiOMGap5TAd1+nRXAM+POu+jtUvr/0fSOmAdwAKO6EjlIiLaMgxNj3xaS52kO9a86LH2rFoPrAc4Skc36I8gIpqsNSOtOeok3T7gxFHnK+n8poQREZMkhgouBjRVdZLuQ8BqSScDLwCXAr/X1VpFRNTUepA2g5Ku7UFJVwL3ArOBm21v7HrNIiJqaI3TnUFJF8D2PcA9Xa5LRMSkDM+klm5ExHQ2I1u6ERHTlRFDDdp5LEk3Ihov3QsREYUYsd+ze12N2pJ0I6LRWpMjDvPuhcVnDBbbMPLbZywpEgfgmp9uLRJn8ew9ReIA7B4qs2szwN6Cuxw/tf/YYrGWFPp97R46skgcgM8cW2ZU6Lfn7O1IOXmQFhFRiC2G3JyWbnNqGhExjmFU62hH0omSvi9pk6SNkq6qrv+JpBckPVYd7xv1mWuqtcaflnR+uxhp6UZEo7UepHUslQ0Cn7T9iKS3AA9Luq967y9sf270zZJOp7U0whnACcD/lPRP7PG3J07SjYhG6+SDNNv9QH/1+nVJm2gtbzuei4Dbbe8DnpG0mdYa5P8w3gfSvRARjTdk1TqAZZI2jDrWjVempFXAWcCD1aUrJT0u6WZJS6trY603fqgknZZuRDTbBGek7bS9tt1Nko4Evg58wvZrkm4E/pRWw/pPgc8DH6HmeuOjJelGROMNd3D0gqS5tBLurbbvArD90qj3vwR8qzqd8Hrj6V6IiEZrLXgzq9bRjiQBNwGbbF8/6vryUbf9DvBE9fpu4FJJ86s1x1cDPzpUjLR0I6LRjBjo3DTgc4HLgB9Leqy69mngA5LW0MrxzwIfB7C9UdIdwJO0Rj5ccaiRC5CkGxENZ9OxyRG2H2Dsftpx1xO3fR1wXd0YSboR0XD1Jj5MF0m6EdFopnMt3RKSdCOi8bKIeUREIUZZxDwiopTWFuzNSWXNqWlExJiU9XQjIkoxnZ2R1m1JuhHReGnpRkQUYist3YiIUloP0rIbcEREIc3aI60rSXe+Blk1b2c3ij5IqR16Af7s1H9aJM6aR4uEAeCk+buKxTpz3ovFYm0ZXFws1ouFYvXtP6ZIHID7Z71RJM5rw1Mvo/UgLX26ERHFZEZaREQhmZEWEVFYpzamLCFJNyIazYaB4STdiIgiWt0LSboREcVkRlpERCFNGzLWtk0u6URJ35e0SdJGSVeVqFhERD2t7oU6x3RQp6U7CHzS9iOS3gI8LOk+2092uW4REbXMqD3SbPcD/dXr1yVtAlbQ2nI4IqKnWqMXZujaC5JWAWcBD47x3jpgHcCyE+Z2oGoREe01bXJE7U4OSUcCXwc+Yfu1A9+3vd72WttrFx+d53MRUc5wtQ17u2M6qJUdJc2llXBvtX1Xd6sUEVFf00YvtE26kgTcBGyyfX33qxQRMTHTZWRCHXVauucClwE/lvRYde3Ttu/pXrUiIuqxxeBMSrq2H4Bp0hkSETGGGdW9EBExnTWtT7c5bfKIiHEMW7WOdsabgSvpaEn3SfpJ9XNpdV2S/krSZkmPS3p7uxhJuhHRaCPjdDuRdHlzBu6vAOcAV0g6HbgauN/2auD+6hzgQmB1dawDbmwXIEk3IhqvU+N0bffbfqR6/TowMgP3IuCW6rZbgIur1xcBf+OWfwSWSFp+qBhd6dPd57ls2XdcN4o+yOLZe4rEgXIbRj52Vpk4AMc/UW724Pf2nFYs1poF5TYs3bDnlCJx9g6X+129MLC0SJwB9025DBsG6y9ivkzShlHn622vH+vGA2bgHl8tiYDtfkkjCW4F8Pyoj/VV1/rHq0AepEVE403gQdpO22vb3XTgDNzWdIWxbx3jmg9VdpJuRDRap9deGGcG7kuSllet3OXA9up6H3DiqI+vBLYdqvz06UZE49mqdbRziBm4dwOXV68vB7456vrvV6MYzgFeHemGGE9auhHReB1czGbMGbjAnwN3SPoosBW4pHrvHuB9wGZgD/DhdgGSdCOi0ezOTY5oMwP3vDHuN3DFRGIk6UZEw4mhbMEeEVFOnf7a6SJJNyIarWlrLyTpRkSzudWv2xRJuhHReNNlK546knQjotGcB2kREWWleyEioqCMXoiIKMRO0o2IKCpDxiIiCkqfbkREIUYMZ/RCREQ5DWroJulGRMPlQVpERGENauom6UZE4x32LV1h5s8a6EbRB9k9tKhIHICT5u8qEqfkDr33/upRxWKd/0SxUDw7sKxYrLOP+GmROK8NLygSB2BJoV22580anHIZBoaHD/OkGxFRjIHDvaUbEVFSxulGRJSUpBsRUUq97dWniyTdiGi+tHQjIgoxOKMXIiJKak7Srb1KhKTZkh6V9K1uVigiYsJc85gGJrI0z1XApm5VJCJi0mZa0pW0Evgt4MvdrU5ExASNTI6oc0wDdVu6NwCfAobHu0HSOkkbJG342cv7O1K5iIg6Wlv2tD+mg7ZJV9L7ge22Hz7UfbbX215re+2RS+d1rIIREW0Nq94xDdRp6Z4L/LakZ4Hbgd+Q9NWu1ioiYgLkekfbcqSbJW2X9MSoa38i6QVJj1XH+0a9d42kzZKelnR+nbq2Tbq2r7G90vYq4FLge7Y/WKfwiIiuq/sQrV73wleAC8a4/he211THPQCSTqeVE8+oPvNFSbPbBWjOxkIREWOq+RCtxoM02z8EdtcMfBFwu+19tp8BNgPvaPehCSVd239v+/0T+UxERNfVb+kuG3ngXx3raka4UtLjVffD0uraCuD5Uff0VdcOKS3diGi+4ZoH7Bx54F8d62uUfiNwKrAG6Ac+X10fq+ncthMj04Ajotm6vIi57ZdGXkv6EjAyK7cPOHHUrSuBbe3KS0s3IhqvU6MXxixbWj7q9HeAkZENdwOXSpov6WRgNfCjduWlpRsRzdehiQ+SbgPeQ6vvtw+4FniPpDVVlGeBjwPY3ijpDuBJYBC4wvZQuxhJuhERFdsfGOPyTYe4/zrguonE6ErSna8BTp23vRtFH2Svy+2ce+a8F4vE+d6e04rEgbI79JbcefgrWx8vFuvBfW8tEmf13B1F4gDsGl5YLFYnTLbroBfS0o2IZjPTZopvHUm6EdF8aelGRJST7oWIiJKSdCMiCkrSjYgoYyoTH3ohSTcimi+jFyIiyklLNyKipCTdiIhC0qcbEVFYkm5ERDka7nUN6st6uhERBaWlGxHNl+6FiIhC8iAtIqKwJN2IiIKSdCMiyhDNGr2QpBsRzZY+3YiIwpJ0IyIKOtyT7nwNccrc3d0o+iBP7T+2SByALYOLi8RZs2BrkTgAzw4sKxar5A69Hzrp14vF+tLWB4rEGSqYWB7af1yROPuG+ztSTroXIiJKStKNiCjEGb0QEVFWWroREeWkTzcioqQk3YiIQkyjkm7W042IRhNvbsPe7mhblnSzpO2Snhh17WhJ90n6SfVzaXVdkv5K0mZJj0t6e5361kq6kpZIulPSU5I2SXpnnc9FRJTQqaQLfAW44IBrVwP3214N3F+dA1wIrK6OdcCNdQLUben+JfAd278MnAlsqvm5iIjuc82jXTH2D4EDZ3ZdBNxSvb4FuHjU9b9xyz8CSyQtbxejbdKVdBTwbuCmqlL7bb/SvvoREYV0KOmO43jb/QDVz5HpeiuA50fd11ddO6Q6Ld1TgB3AX0t6VNKXJS068CZJ6yRtkLTh5d0NGqkcEc1Ws2uh6l5YNpKnqmPdFCJr7NocWp2kOwd4O3Cj7bOAn/Nmn8abkez1ttfaXrv06Dyfi4iC6rd0d47kqepYX6P0l0a6Daqf26vrfcCJo+5bCWxrV1id7NgH9Nl+sDq/k1YSjoiYFjRc75iku4HLq9eXA98cdf33q1EM5wCvjnRDHErbcbq2X5T0vKS32X4aOA94cnJ1j4jovE7NSJN0G/AeWt0QfcC1wJ8Dd0j6KLAVuKS6/R7gfcBmYA/w4Tox6k6O+APgVknzgC11C4+I6LoOTo6w/YFx3jpvjHsNXDHRGLWSru3HgLUTLTwioogGzUjLNOCIaLSRGWlNkaQbEY2n4eZk3STdiGi2hi14k6QbEY2X7oWIiJIO96Q7R1BqUtqS2XvKBAJeLLQb8IY9pxSJA3D2ET8tFuvBfW8tFqvUDr0AHyu08/DCHxxfJA7Ax1b8oEicuRrqSDlp6UZElJSkGxFRSHYDjogoJ+N0IyJKc3OybpJuRDReWroREaVkckRERFl5kBYRUVCSbkREKSYP0iIiSsqDtIiIkpJ0IyLKyOSIiIiS7CxiHhFRVHNybpJuRDRfuhciIkoxkO6FiIiCmpNzk3QjovnSvRARUVBGL0RElJJVxmA2Yumshd0o+iC7h44sEgegb/8xReLsHZ5bJA7Aa8MLisVaPXdHsVhDBf8jLLVh5Bv//KUicQB+srHMJqJ7/dyUy2hNjmhO1k1LNyKaL6uMRUSU08mWrqRngdeBIWDQ9lpJRwN/C6wCngX+le2XJ1P+rM5UMyKiRzyBo7732l5je211fjVwv+3VwP3V+aQk6UZEw7XWXqhzTMFFwC3V61uAiydbUJJuRDSfXe+oWRrwXUkPS1pXXTvedn8rlPuB4yZb1fTpRkSzeULb9SyTtGHU+Xrb6w+451zb2yQdB9wn6alOVHNEkm5ENF/9VuzOUf204xTlbdXP7ZK+AbwDeEnSctv9kpYD2ydb1VrdC5L+SNJGSU9Iuk1SucGdERHtdOhBmqRFkt4y8hr4l8ATwN3A5dVtlwPfnGxV27Z0Ja0A/hA43fYbku4ALgW+MtmgERGdpOGODdQ9HviGJGjlx/9u+zuSHgLukPRRYCtwyWQD1O1emAMslDQAHAFsm2zAiIiOMh2bHGF7C3DmGNd3Aed1Ikbb7gXbLwCfo5Xd+4FXbX/3wPskrZO0QdKGnbsaND0kIhpNGLneMR20TbqSltIao3YycAKwSNIHD7zP9nrba22vXXZMRqJFREGdHTLWVXWy428Cz9jeYXsAuAt4V3erFRExAQ1KunX6dLcC50g6AniDVr/GhkN/JCKikA726ZbQNunaflDSncAjwCDwKHDgYOKIiJ7p4OiFrqs1esH2tcC1Xa5LRMQkTJ+ugzoyIy0ims0k6UZEFNWc3oUk3YhovukyBreOJN2IaL4k3YiIQmwYak7/QleSbv/gQj6789e6UfRBPnPsxiJxAO6f9UaROC8MLC0SB2DJ7D3FYu0aLrNDNMBD+ye9xvSEfWzFD4rEKbVDL8C3z1hSJM6r7lAKSks3IqKgJN2IiEIMTG3/s6KSdCOi4Qw+zPt0IyKKMXmQFhFRVPp0IyIKStKNiCglC95ERJRjYKYt7RgRMa2lpRsRUUqmAUdElGNwxulGRBSUGWkREQWlTzciohA7oxciIopKSzciohTjoaFeV6K2JN2IaLYs7RgRUViDhozN6nUFIiKmwoCHXeuoQ9IFkp6WtFnS1Z2ub5JuRDSbq0XM6xxtSJoNfAG4EDgd+ICk0ztZ3XQvRETjdfBB2juAzba3AEi6HbgIeLJTAeQuDLWQtAN4boIfWwbs7Hhlem8mfq+Z+J1gZn6v6f6dfsn2sVMpQNJ3aH3POhYAe0edr7e9flRZvwtcYPvfVueXAWfbvnIqdRytKy3dyfwhStpge2036tNLM/F7zcTvBDPze83E73Qg2xd0sDiNFaKD5adPNyJilD7gxFHnK4FtnQyQpBsR8aaHgNWSTpY0D7gUuLuTAabTg7T17W9ppJn4vWbid4KZ+b1m4nfqGtuDkq4E7gVmAzfb3tjJGF15kBYREWNL90JEREFJuhERBfU86XZ7yl0vSDpR0vclbZK0UdJVva5Tp0iaLelRSd/qdV06RdISSXdKeqr6nb2z13XqBEl/VP39e0LSbZIW9LpO0eOkW2LKXY8MAp+0/SvAOcAVM+R7AVwFbOp1JTrsL4Hv2P5l4ExmwPeTtAL4Q2Ct7V+l9VDo0t7WKqD3Ld1fTLmzvR8YmXLXaLb7bT9SvX6d1n/EK3pbq6mTtBL4LeDLva5Lp0g6Cng3cBOA7f22X+ltrTpmDrBQ0hzgCDo83jQmp9dJdwXw/KjzPmZAchpN0irgLODB3takI24APgU0Zx299k4BdgB/XXWbfFnSol5XaqpsvwB8DtgK9AOv2v5ub2sV0Puk2/Upd70k6Ujg68AnbL/W6/pMhaT3A9ttP9zrunTYHODtwI22zwJ+DjT+2YKkpbT+1XgycAKwSNIHe1urgN4n3a5PuesVSXNpJdxbbd/V6/p0wLnAb0t6llY30G9I+mpvq9QRfUCf7ZF/idxJKwk33W8Cz9jeYXsAuAt4V4/rFPQ+6XZ9yl0vSBKtPsJNtq/vdX06wfY1tlfaXkXr9/Q9241vOdl+EXhe0tuqS+fRwWX8emgrcI6kI6q/j+cxAx4QzgQ9nQZcYspdj5wLXAb8WNJj1bVP276nh3WK8f0BcGv1P/4twId7XJ8ps/2gpDuBR2iNpnmUTAmeFjINOCKioF53L0REHFaSdCMiCkrSjYgoKEk3IqKgJN2IiIKSdCMiCkrSjYgo6P8BUbVg8QA+TJoAAAAASUVORK5CYII=\n", "text/plain": [ "
" ] }, - "metadata": {}, + "metadata": { + "needs_background": "light" + }, "output_type": "display_data" } ], @@ -392,7 +387,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 9, "metadata": {}, "outputs": [], "source": [ @@ -413,7 +408,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 17, "metadata": { "scrolled": false }, @@ -424,20 +419,36 @@ "text": [ "Computing O...\n", "Estimating \\mu...\n", - "[E:0]\tTrain Loss: 6.273\n", - "[E:25]\tTrain Loss: 0.271\n", - "[E:50]\tTrain Loss: 0.020\n", - "[E:75]\tTrain Loss: 0.005\n", - "[E:99]\tTrain Loss: 0.002\n", + "[E:0]\tTrain Loss: 8.279\n", + "[E:25]\tTrain Loss: 0.706\n", + "[E:50]\tTrain Loss: 0.029\n", + "[E:75]\tTrain Loss: 0.008\n", + "[E:100]\tTrain Loss: 0.005\n", + "[E:125]\tTrain Loss: 0.003\n", + "[E:150]\tTrain Loss: 0.003\n", + "[E:175]\tTrain Loss: 0.003\n", + "[E:200]\tTrain Loss: 0.002\n", + "[E:225]\tTrain Loss: 0.002\n", + "[E:250]\tTrain Loss: 0.002\n", + "[E:275]\tTrain Loss: 0.002\n", + "[E:300]\tTrain Loss: 0.002\n", + "[E:325]\tTrain Loss: 0.002\n", + "[E:350]\tTrain Loss: 0.002\n", + "[E:375]\tTrain Loss: 0.002\n", + "[E:400]\tTrain Loss: 0.002\n", + "[E:425]\tTrain Loss: 0.002\n", + "[E:450]\tTrain Loss: 0.002\n", + "[E:475]\tTrain Loss: 0.002\n", + "[E:499]\tTrain Loss: 0.002\n", "Finished Training\n", - "CPU times: user 189 ms, sys: 14.6 ms, total: 204 ms\n", - "Wall time: 57 ms\n" + "CPU times: user 1.78 s, sys: 131 ms, total: 1.92 s\n", + "Wall time: 243 ms\n" ] } ], "source": [ "%%time\n", - "label_model.train(Ls[0], Y_dev=Ys[1], n_epochs=100, print_every=25)" + "label_model.train(Ls[0], Y_dev=Ys[1], n_epochs=500, print_every=25)" ] }, { @@ -449,19 +460,19 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 18, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Accuracy: 0.889\n" + "Accuracy: 0.888\n" ] } ], "source": [ - "score = label_model.score(Ls[1], Ys[1])" + "score = label_model.score((Ls[1], Ys[1]))" ] }, { @@ -473,21 +484,21 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 19, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Precision: 0.833\n", - "Recall: 0.687\n", - "F1: 0.753\n" + "Precision: 0.838\n", + "Recall: 0.675\n", + "F1: 0.748\n" ] } ], "source": [ - "scores = label_model.score(Ls[1], Ys[1], metric=['precision', 'recall', 'f1'])" + "scores = label_model.score((Ls[1], Ys[1]), metric=['precision', 'recall', 'f1'])" ] }, { @@ -499,7 +510,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 21, "metadata": {}, "outputs": [ { @@ -516,7 +527,7 @@ "from metal.label_model.baselines import MajorityLabelVoter\n", "\n", "mv = MajorityLabelVoter(seed=123)\n", - "scores = mv.score(Ls[1], Ys[1], metric=['precision', 'recall', 'f1'])" + "scores = mv.score((Ls[1], Ys[1]), metric=['precision', 'recall', 'f1'])" ] }, { @@ -545,22 +556,22 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 22, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "array([[0.2649789 , 0.7350211 ],\n", - " [0.00867136, 0.99132864],\n", - " [0.01536846, 0.98463154],\n", + "array([[0.2409358 , 0.7590642 ],\n", + " [0.0087304 , 0.9912696 ],\n", + " [0.01293511, 0.98706489],\n", " ...,\n", - " [0.60597746, 0.39402254],\n", - " [0.9802152 , 0.0197848 ],\n", - " [0.278771 , 0.721229 ]])" + " [0.5918672 , 0.4081328 ],\n", + " [0.98033657, 0.01966343],\n", + " [0.2443802 , 0.7556198 ]])" ] }, - "execution_count": 13, + "execution_count": 22, "metadata": {}, "output_type": "execute_result" } @@ -594,19 +605,21 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 23, "metadata": { "scrolled": true }, "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAH5NJREFUeJzt3XuYVXXd9/H3Rw7iKREcDUEOGiIeEUeDxENiGZ7QnkhNk4w7KjUr7yz0ee5bvG8teywtuwsjMSHzHD7gMUzksjQPnDQVEVSUEZWJACFExb7PH/s3umdYzOxhZs3eOJ/Xde1rr/Vbv7XWd3ON++NvrbXXUkRgZmbW0FblLsDMzCqTA8LMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwyOSBsiyZplqR/a+t10/qHS1q4uetnbO8+SaPT9Fck/aUVt32GpBmttT1rHxwQVhEkLZF0TLnrqCNpvKT3JK1Jrxck/Y+kHnV9IuLPETGgxG3d2FS/iBgREZNbofa+kkJSx6Jt/z4iPtvSbVv74oAw27RbI2IHoBtwCvBxYE5xSLQGFfi/Ras4/qO0iiZpJ0l3S6qVtDJN92rQbU9JT0haLWmapG5F6w+R9KikVZKeknRUc2uIiPci4lngVKAW+Pe07aMk1RTt6weSXksjjoWShkv6HHAxcKqktZKeSn1nSbpc0iPAOmCPjENekvSL9LmelzS8aEG9EVeDUcrD6X1V2ufQhoesJH1K0pNp209K+lTRslmS/lvSI+mzzJC0c3P/3WzL54CwSrcV8FugD9AbeBv4nwZ9zgK+CuwGbACuAZDUE7gHuIzCKOB7wB8kVW1OIRHxPjANOLzhMkkDgPOAQ9Ko41hgSUTcD/yQwmhk+4g4sGi1LwNjgR2AVzJ2+UngJWBn4BJganH4NeKI9N417fOvDWrtRuHf5RqgO3AVcI+k7kXdvgScDewCdKbwb2ftjAPCKlpErIiIP0TEuohYA1wOHNmg2+8i4pmI+CfwH8AXJXUAzgTujYh7I+JfEfEAMBs4rgUlLaMQNg29D2wN7COpU0QsiYgXm9jWDRHxbERsiIj3MpYvB36WRjC3AguB41tQe53jgUUR8bu075uB54ETi/r8NiJeiIi3gduAQa2wX9vCOCCsoknaVtKvJb0i6S0Kh0+6pgCos7Ro+hWgE4X/6+4DjEqHl1ZJWgUMA1pyDqEn8I+GjRGxGPgOMB5YLukWSbs1sa2lTSx/LerfTfMVCqOkltqNjUcsr1D4bHXeKJpeB2zfCvu1LYwDwirdvwMDgE9GxMf48PCJivrsXjTdG3gP+DuFL+DfRUTXotd2EXHF5hSSTiSfCPw5a3lE3BQRwygEUwA/rlu0iU02dSvlnpKKP2dvCiMYgH8C2xYt+3gztrss1VisN/BaE+tZO+OAsErSSVKXoldHCsfn36ZwwrUbhWPxDZ0paR9J2wL/BdyRzhfcCJwo6VhJHdI2j8o4yd0oSZ0kDQRupvBFfFVGnwGSjpa0NbA+1fx+Wvwm0HczrlTaBTg/7X8UMBC4Ny2bD5yWllUDXyharxb4F7DHJrZ7L7CXpC9J6ijpVGAf4O5m1mcfcQ4IqyT3UvhirXuNB34GbENhRPAYcH/Ger8DbqBwWKQLcD5ARCwFRlK4iqiWwojiQkr/uz9V0lpgFTAdWAEcHBHLMvpuDVyR6nyDwpf7xWnZ7el9haS5Je4b4HGgf9rm5cAXImJFWvYfwJ7ASuBS4Ka6lSJiXer/SDq0NqR4o2kbJ1AYna0Avg+cEBF/b0Zt1g7IDwwyM7MsHkGYmVkmB4SZmWVyQJiZWSYHhJmZZerYdJfKtfPOO0ffvn3LXYaZ2RZlzpw5f4+IJm85s0UHRN++fZk9e3a5yzAz26JIyrr310Z8iMnMzDI5IMzMLJMDwszMMm3R5yDM7KPpvffeo6amhvXr15e7lC1aly5d6NWrF506ddqs9R0QZlZxampq2GGHHejbty/1b2hrpYoIVqxYQU1NDf369dusbeR6iEnSdyU9K+kZSTenu2n2k/S4pEWSbpXUOfXdOs0vTsv75lmbmVWu9evX0717d4dDC0iie/fuLRqF5RYQ6XGP5wPVEbEf0AE4jcI98q+OiP4U7kQ5Jq0yBlgZEZ8ArubDe+mbWTvkcGi5lv4b5n2SuiOwTbqv/7bA68DRwB1p+WTg5DQ9Ms2Tlg+X/0LMzMomt3MQEfGapJ8Ar1K4t/8MYA6wKiI2pG41fPiYw56kRzBGxAZJqyk8UL3ePeoljaXwoHd69+6dV/lmVkH6jrunVbe35IrGH+3doUMH9t9/fzZs2MDAgQOZPHky2267baPrbMqsWbP4yU9+wt1338306dN57rnnGDduXGbfVatWcdNNN3HOOec0ax/jx49n++2353vf+95m1bgpuQWEpJ0ojAr6UXjgyu3AiIyudQ+kyBotbPSwioiYCEwEqK6u9sMs7COrtb8UtyS/OakH79WsKtv+t9lmG+bPnw/AGWecwbXXXssFF1zwwfKIICLYaqvmHYQ56aSTOOmkkza5fNWqVfzqV79qdkDkJc9DTMcAL0dEbUS8B0wFPkXhgfN1wdSLD5+xW0N6tnBaviMZD4c3M2tLhx9+OIsXL2bJkiUMHDiQc845h8GDB7N06VJmzJjB0KFDGTx4MKNGjWLt2rUA3H///ey9994MGzaMqVOnfrCtG264gfPOOw+AN998k1NOOYUDDzyQAw88kEcffZRx48bx4osvMmjQIC688EIArrzySg455BAOOOAALrnkwyfuXn755QwYMIBjjjmGhQsX5vLZ8wyIV4EhkrZN5xKGA88BD/Hh83NHA9PS9PQ0T1o+M/y4OzMrow0bNnDfffex//77A7Bw4ULOOuss5s2bx3bbbcdll13Gn/70J+bOnUt1dTVXXXUV69ev52tf+xp33XUXf/7zn3njjTcyt33++edz5JFH8tRTTzF37lz23XdfrrjiCvbcc0/mz5/PlVdeyYwZM1i0aBFPPPEE8+fPZ86cOTz88MPMmTOHW265hXnz5jF16lSefPLJXD5/nucgHpd0BzAX2ADMo3Bo6B7gFkmXpbZJaZVJwO8kLaYwcjgtr9rMzBrz9ttvM2jQIKAwghgzZgzLli2jT58+DBlSeMT3Y489xnPPPcdhhx0GwLvvvsvQoUN5/vnn6devH/379wfgzDPPZOLEiRvtY+bMmUyZMgUonPPYcccdWblyZb0+M2bMYMaMGRx00EEArF27lkWLFrFmzRpOOeWUD86LNHbYqiVy/aFcRFwCXNKg+SXg0Iy+64FRedZjZlaK4nMQxbbbbrsPpiOCz3zmM9x88831+syfP7/VLtGNCC666CK+/vWv12v/2c9+1iaXAfteTGZmm2HIkCE88sgjLF68GIB169bxwgsvsPfee/Pyyy/z4osvAmwUIHWGDx/OhAkTAHj//fd566232GGHHVizZs0HfY499liuv/76D85tvPbaayxfvpwjjjiCO++8k7fffps1a9Zw11135fIZfasNM6t40887jAN6dS13GfVUVVVxww03cPrpp/POO+8AcNlll7HXXnsxceJEjj/+eHbeeWeGDRvGM888s9H6P//5zxk7diyTJk2iQ4cOTJgwgaFDh3LYYYex3377MWLECK688koWLFjA0KFDAdh+++258cYbGTx4MKeeeiqDBg2iT58+HH744bl8Rm3J54Grq6vDDwyyj6r2fpnrrr33qNdWaQGxpViwYAEDBw6s1yZpTkRUN7WuDzGZmVkmB4SZmWVyQJiZWSafpDazLcOyeeWuoLLsdlDuu/AIwszMMjkgzMwskw8xmVnFO+C6Pq27wbGzmuzyZu0Kvjv+pzw292/stOMOdO7Uie+fM5pTRhyd2X/Wo7P5ybVTuHvKNRst6/vJ45l9343s3G2nFhbethwQZmYNRAQnf/UCRo86kZt++UMAXqlZxvQZD5e5srblgDAza2DmX56gc+dOfOOsL3zQ1qfXbnzrq6exfv07fPOiHzL76QV07NCBqy65gE8fdki99Vf8YxWnn3sxtStWcuigfdlSf5DscxBmZg08+8JLDN5v78xlv7zhNgD+9uBt3PyrHzL6O5ewfv079fpcevVEhh06iHkzbuakzx7Jq69l3/K70nkEYWbWhHMv/hF/eWI+nTt3olePXfnW2acCsPcn+tGn18d54aVX6vV/+LG5TL3uJwAcf8zh7NT1Y21ec2vwCMLMrIF999qDuc88/8H8L394EQ/edi21K1aWfLioLW7HnTcHhJlZA0cPO5T177zLhMm3f9C27u31ABzxycH8/s77AHjhxVd49bU3GLBn33rrHzFkML+fWuhz38xHWLnqrbYpvJX5EJOZVbyn/+0VDtjq5TbbnyT+36Sf8t3xP+X/TphMVfed2G6bbfjxxecz8tij+Ma4H7L/8C/SsUMHbrj6UrbeunO99S/57lhOP/diBh/7JY4cMpjePT/eZrW3ptxu9y1pAHBrUdMewH8CU1J7X2AJ8MWIWJmeW/1z4DhgHfCViJjb2D58u2/7KPPtvhvc7rsNA2KLUOKtNirydt8RsTAiBkXEIOBgCl/6dwLjgAcjoj/wYJoHGAH0T6+xwIS8ajMzs6a11TmI4cCLEfEKMBKYnNonAyen6ZHAlCh4DOgqqUcb1WdmZg20VUCcBtQ9mHXXiHgdIL3vktp7AkuL1qlJbfVIGitptqTZtbW1OZZsZuUSxBb747JK0tJ/w9wDQlJn4CTg9qa6ZrRt9OkiYmJEVEdEdVVVVWuUaGYV5pVV77Fh3VsOiRaICFasWEGXLl02exttcRXTCGBuRLyZ5t+U1CMiXk+HkJan9hpg96L1egHL2qA+M6swv3h8Jd8C+nT9O0r/77hAPmJQz+oFTXbp0qULvXr12uxdtEVAnM6Hh5cApgOjgSvS+7Si9vMk3QJ8ElhddyjKzNqXt975F5c/vKJe25IuXypTNRVq/Orcd5FrQEjaFvgM8PWi5iuA2ySNAV4FRqX2eylc4rqYwhVPZ+dZm5mZNS7XgIiIdUD3Bm0rKFzV1LBvAOfmWY+ZmZXOt9owM7NMDggzM8vkgDAzs0wOCDMzy+SAMDOzTA4IMzPL5IAwM7NMDggzM8vkgDAzs0wOCDMzy+SAMDOzTA4IMzPL5IAwM7NMDggzM8vkgDAzs0wOCDMzy+SAMDOzTLkGhKSuku6Q9LykBZKGSuom6QFJi9L7TqmvJF0jabGkpyUNzrM2MzNrXN4jiJ8D90fE3sCBwAJgHPBgRPQHHkzzACOA/uk1FpiQc21mZtaI3AJC0seAI4BJABHxbkSsAkYCk1O3ycDJaXokMCUKHgO6SuqRV31mZta4PEcQewC1wG8lzZN0naTtgF0j4nWA9L5L6t8TWFq0fk1qq0fSWEmzJc2ura3NsXwzs/Ytz4DoCAwGJkTEQcA/+fBwUhZltMVGDRETI6I6Iqqrqqpap1IzM9tIngFRA9RExONp/g4KgfFm3aGj9L68qP/uRev3ApblWJ+ZmTUit4CIiDeApZIGpKbhwHPAdGB0ahsNTEvT04Gz0tVMQ4DVdYeizMys7XXMefvfAn4vqTPwEnA2hVC6TdIY4FVgVOp7L3AcsBhYl/qamVmZ5BoQETEfqM5YNDyjbwDn5lmPmZmVzr+kNjOzTA4IMzPL5IAwM7NMDggzM8vkgDAzs0wOCDMzy+SAMDOzTA4IMzPL5IAwM7NMTQaEpO0kbZWm95J0kqRO+ZdmZmblVMoI4mGgi6SeFJ4AdzZwQ55FmZlZ+ZUSEIqIdcDngV9ExCnAPvmWZWZm5VZSQEgaCpwB3JPa8r4LrJmZlVkpAfFt4CLgzoh4VtIewEP5lmVmZuXW5EggIh6mcB6ibv4l4Pw8izIzs/JrMiAk7QV8D+hb3D8ijs6vLDMzK7dSziXcDlwLXAe8n285ZmZWKUoJiA0RMWFzNi5pCbCGQrBsiIhqSd2AWymMSJYAX4yIlZIE/JzCY0fXAV+JiLmbs18zM2u5Uk5S3yXpHEk9JHWrezVjH5+OiEERUffo0XHAgxHRn8LvKsal9hFA//QaC2xWKJmZWesoZQQxOr1fWNQWwB6buc+RwFFpejIwC/hBap+Snk39mKSuknpExOubuR8zM2uBUq5i6teC7QcwQ1IAv46IicCudV/6EfG6pF1S357A0qJ1a1JbvYCQNJbCCIPevXu3oDQzM2tMKVcxdQK+CRyRmmZR+LJ/r4TtHxYRy1IIPCDp+cZ2ldEWGzUUQmYiQHV19UbLzcysdZRyDmICcDDwq/Q6mBLPD0TEsvS+HLgTOBR4U1IPgPS+PHWvAXYvWr0XsKyU/ZiZWesrJSAOiYjRETEzvc4GDmlqpXQX2B3qpoHPAs8A0/nwvMZoYFqang6cpYIhwGqffzAzK59STlK/L2nPiHgRIN1qo5TfQ+wK3Fm4epWOwE0Rcb+kJ4HbJI0BXgVGpf73UrjEdTGFy1zPbtYnMTOzVlVKQFwIPCTpJQrnCfpQwpd3uiXHgRntK4DhGe0BnFtCPWZm1gZKuYrpQUn9gQEUAuL5iHgn98rMzKysNhkQko6OiJmSPt9g0Z6SiIipOddmZmZl1NgI4khgJnBixrIAHBBmZh9hmwyIiLgkTf5XRLxcvExSS348Z2ZmW4BSLnP9Q0bbHa1diJmZVZbGzkHsDewL7NjgPMTHgC55F2ZmZuXV2DmIAcAJQFfqn4dYA3wtz6LMzKz8GjsHMQ2YJmloRPy1DWsyM7MKUMo5iG9I6lo3I2knSdfnWJOZmVWAUgLigIhYVTcTESuBg/IryczMKkEpAbGVpJ3qZtLT5Eq5RYeZmW3BSvmi/ynwqKS6S1tHAZfnV5KZmVWCUu7FNEXSbOBoCvdi+nxEPJd7ZWZmVlaN/Q7iYxHxVjqk9AZwU9GybhHxj7Yo0MzMyqOxEcRNFH4HMYf6j/5Umt8jx7rMzKzMGvsdxAnp3fddMjNrhxo7xDS4sRUjYm7rl2NmZpWisUNMP03vXYBq4CkKh5cOAB4HhpWyA0kdgNnAaxFxQroT7C1AN2Au8OWIeFfS1sAU4GBgBXBqRCxp9icyM7NWscnfQUTEpyPi08ArwOCIqI6Igyn8SG5xM/bxbWBB0fyPgasjoj+wEhiT2scAKyPiE8DVqZ+ZmZVJKT+U2zsi/lY3ExHPAINK2bikXsDxwHVpXhQul637TcVk4OQ0PTLNk5YPT/3NzKwMSgmIBZKuk3SUpCMl/Yb6I4LG/Az4PvCvNN8dWBURG9J8DdAzTfcElgKk5atT/3okjZU0W9Ls2traEsswM7PmKiUgzgaepXCo6DvAc6mtUZJOAJZHxJzi5oyuUcKyDxsiJqbDXdVVVVVNlWFmZpuplF9Sr5d0LXBvRCxsxrYPA06SdByFE90fozCi6CqpYxol9AKWpf41wO5AjaSOwI6Af4xnZlYmTY4gJJ0EzAfuT/ODJE1var2IuCgiekVEX+A0YGZEnAE8BHwhdRsNTEvT09M8afnMiNhoBGFmZm2jlENMlwCHAqsAImI+0LcF+/wBcIGkxRTOMUxK7ZOA7qn9AmBcC/ZhZmYtVMrdXDdExOqWXFAUEbOAWWn6JQqB07DPegp3ijUzswpQSkA8I+lLQAdJ/YHzgUfzLcvMzMqtlENM3wL2Bd6hcAO/1RSuZjIzs4+wRkcQ6TYZl0bEhcD/bpuSzMysEjQ6goiI9yncG8nMzNqZUs5BzEuXtd4O/LOuMSKm5laVmZmVXSkB0Y3C3VWPLmoLwAFhZvYRVkpAXBgRf8+9EjMzqyibPAch6URJtcDTkmokfaoN6zIzszJr7CT15cDhEbEb8L+AH7VNSWZmVgkaC4gNEfE8QEQ8DuzQNiWZmVklaOwcxC6SLtjUfERclV9ZZmZWbo0FxG+oP2poOG9mZh9hmwyIiLi0LQsxM7PKUsq9mMzMrB1yQJiZWSYHhJmZZSrlkaP/p2h663zLMTOzStHYL6m/L2koHz4/GuCvpW5YUhdJT0h6StKzki5N7f0kPS5pkaRbJXVO7Vun+cVped/N+0hmZtYaGhtBLKTwCNA9JP1Z0kQKz4weUOK23wGOjogDgUHA5yQNAX4MXB0R/YGVwJjUfwywMiI+AVyd+pmZWZk0FhArgYuBxcBRwDWpfZykJh85GgVr02yn9AoKd4W9I7VPBk5O0yPTPGn5cLXkQdhmZtYijQXE54B7gD2Bq4BDgX9GxNkRUdKN+yR1kDQfWA48ALwIrIqIDalLDdAzTfcElgKk5auB7hnbHCtptqTZtbW1pZRhZmabYZMBEREXR8RwYAlwI4Uf1VVJ+ouku0rZeES8HxGDgF4UAmZgVrf0njVaiI0aIiZGRHVEVFdVVZVShpmZbYZSngfxx4h4EnhS0jcjYpiknZuzk4hYJWkWMAToKqljGiX0ApalbjXA7kCNpI7AjsA/mrMfMzNrPU1e5hoR3y+a/Upqa/IBQpKqJHVN09sAxwALgIf48Mqo0cC0ND09zZOWz4yIjUYQZmbWNkoZQXwgIp5qRvcewGRJHSgE0W0Rcbek54BbJF0GzAMmpf6TgN9JWkxh5HBac2ozM7PW1ayAaI6IeBo4KKP9JQrnIxq2r6dwWa2ZmVUA32rDzMwyOSDMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwyOSDMzCxTbj+Usy3M+B3LXUFlGb+63BWYlZ1HEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmXILCEm7S3pI0gJJz0r6dmrvJukBSYvS+06pXZKukbRY0tOSBudVm5mZNS3PEcQG4N8jYiAwBDhX0j7AOODBiOgPPJjmAUYA/dNrLDAhx9rMzKwJeT6T+nXg9TS9RtICoCcwEjgqdZsMzAJ+kNqnREQAj0nqKqlH2k6r6zvunjw2u8Va0qXcFZhZpWmTcxCS+gIHAY8Du9Z96af3XVK3nsDSotVqUlvDbY2VNFvS7Nra2jzLNjNr13IPCEnbA38AvhMRbzXWNaMtNmqImBgR1RFRXVVV1VplmplZA7kGhKROFMLh9xExNTW/KalHWt4DWJ7aa4Ddi1bvBSzLsz4zM9u0PK9iEjAJWBARVxUtmg6MTtOjgWlF7Welq5mGAKvzOv9gZmZNy/N5EIcBXwb+Jml+arsYuAK4TdIY4FVgVFp2L3AcsBhYB5ydY21mZtaEPK9i+gvZ5xUAhmf0D+DcvOoxM7Pm8S+pzcwskwPCzMwyOSDMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwyOSDMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwyOSDMzCyTA8LMzDI5IMzMLFOez6S+XtJySc8UtXWT9ICkRel9p9QuSddIWizpaUmD86rLzMxKk+cI4gbgcw3axgEPRkR/4ME0DzAC6J9eY4EJOdZlZmYlyC0gIuJh4B8NmkcCk9P0ZODkovYpUfAY0FVSj7xqMzOzprX1OYhdI+J1gPS+S2rvCSwt6leT2jYiaayk2ZJm19bW5lqsmVl7ViknqZXRFlkdI2JiRFRHRHVVVVXOZZmZtV9tHRBv1h06Su/LU3sNsHtRv17AsjauzczMirR1QEwHRqfp0cC0ovaz0tVMQ4DVdYeizMysPDrmtWFJNwNHATtLqgEuAa4AbpM0BngVGJW63wscBywG1gFn51WXmZmVJreAiIjTN7FoeEbfAM7NqxYzM2u+SjlJbWZmFcYBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpapogJC0uckLZS0WNK4ctdjZtaeVUxASOoA/BIYAewDnC5pn/JWZWbWflVMQACHAosj4qWIeBe4BRhZ5prMzNqtjuUuoEhPYGnRfA3wyYadJI0FxqbZtZIWtkFtH3mCnYG/l7uOinGpyl2BNeC/0QZa9jfap5ROlRQQWZ82NmqImAhMzL+c9kXS7IioLncdZpviv9G2V0mHmGqA3YvmewHLylSLmVm7V0kB8STQX1I/SZ2B04DpZa7JzKzdqphDTBGxQdJ5wB+BDsD1EfFsmctqT3zYziqd/0bbmCI2OsxvZmZWUYeYzMysgjggzMwskwOinZN0vaTlkp4pdy1mWSTtLukhSQskPSvp2+Wuqb3wOYh2TtIRwFpgSkTsV+56zBqS1APoERFzJe0AzAFOjojnylzaR55HEO1cRDwM/KPcdZhtSkS8HhFz0/QaYAGFOy9YzhwQZrbFkNQXOAh4vLyVtA8OCDPbIkjaHvgD8J2IeKvc9bQHDggzq3iSOlEIh99HxNRy19NeOCDMrKJJEjAJWBARV5W7nvbEAdHOSboZ+CswQFKNpDHlrsmsgcOALwNHS5qfXseVu6j2wJe5mplZJo8gzMwskwPCzMwyOSDMzCyTA8LMzDI5IMzMLJMDwqwEktY2o+94Sd/La/tmbcUBYWZmmRwQZptJ0omSHpc0T9KfJO1atPhASTMlLZL0taJ1LpT0pKSnJV1ahrLNSuaAMNt8fwGGRMRBwC3A94uWHQAcDwwF/lPSbpI+C/QHDgUGAQen53GYVaSO5S7AbAvWC7g1PdCmM/By0bJpEfE28LakhyiEwjDgs8C81Gd7CoHxcNuVbFY6B4TZ5vsFcFVETJd0FDC+aFnDe9gEIOBHEfHrtinPrGV8iMls8+0IvJamRzdYNlJSF0ndgaOAJ4E/Al9NzzVAUk9Ju7RVsWbN5RGEWWm2lVRTNH8VhRHD7ZJeAx4D+hUtfwK4B+gN/HdELAOWSRoI/LVwB2vWAmcCy/Mv36z5fDdXMzPL5ENMZmaWyQFhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaW6f8DhXFR1eDsTAEAAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAH5dJREFUeJzt3XuYVnW99/H3Rw7iGUE0BAU0RDwijQaJh8QyNEV7Yqtlso2itpqZqaHPsx9xbzV7NDU74GaHCZmaGT7gMUz0Ms8OB01FZDwyojIiIASo2Hf/cf9Gb4bFzD3MrLlvnM/ruu7rXuu3fmut73CN8/G3jooIzMzMGtqs3AWYmVllckCYmVkmB4SZmWVyQJiZWSYHhJmZZXJAmJlZJgeEbdIkPSjpO229blr/EEnzN3b9jO3dI2l0mv5XSQ+34ra/KWlGa23P2gcHhFUESa9KOrLcddSTNF7Sh5JWpM+Lkn4lqWd9n4j4W0QMKHFbNzbVLyJGRMTkVqi9r6SQ1LFo23+IiC+3dNvWvjggzDbsjxGxDdANOAH4DDCrOCRagwr836JVHP9SWkWTtL2kOyXVSVqapns36La7pCclLZc0TVK3ovWHSHpU0jJJT0s6vLk1RMSHEfEccCJQB/w4bftwSbVF+/qJpDfSiGO+pOGSvgJcCJwoaaWkp1PfByVdKukRYBWwW8YhL0n6Zfq5XpA0vGjBOiOuBqOUh9L3srTPoQ0PWUn6gqSn0rafkvSFomUPSvpPSY+kn2WGpB2a++9mmz4HhFW6zYDfAX2AXYHVwK8a9DkV+DawM7AWuBZAUi/gLuASCqOAc4E/S+qxMYVExEfANOCQhsskDQDOBA5Mo46jgFcj4l7gMgqjka0jYv+i1b4FjAW2AV7L2OXngZeBHYCLgKnF4deIQ9N317TPxxrU2o3Cv8u1QHfgKuAuSd2Lun0DOA3YEehM4d/O2hkHhFW0iFgSEX+OiFURsQK4FDisQbffR8SzEfEP4N+Bf5HUATgFuDsi7o6If0bEfUA1cHQLSlpEIWwa+gjYHNhLUqeIeDUiXmpiWzdExHMRsTYiPsxYvhi4Jo1g/gjMB45pQe31jgEWRMTv075vBl4Aji3q87uIeDEiVgO3AoNaYb+2iXFAWEWTtKWk/5L0mqT3KBw+6ZoCoN7CounXgE4U/q+7DzAqHV5aJmkZMAxoyTmEXsC7DRsjogY4GxgPLJZ0i6Sdm9jWwiaWvxHrPk3zNQqjpJbamfVHLK9R+NnqvVU0vQrYuhX2a5sYB4RVuh8DA4DPR8S2fHL4REV9dima3hX4EHiHwh/g30dE16LPVhFx+cYUkk4kHwv8LWt5RNwUEcMoBFMAP6tftIFNNvUo5V6Sin/OXSmMYAD+AWxZtOwzzdjuolRjsV2BN5pYz9oZB4RVkk6SuhR9OlI4Pr+awgnXbhSOxTd0iqS9JG0J/AdwWzpfcCNwrKSjJHVI2zw84yR3oyR1kjQQuJnCH+KrMvoMkHSEpM2BNanmj9Lit4G+G3Gl0o7AWWn/o4CBwN1p2VzgpLSsCvh60Xp1wD+B3Taw3buBPSR9Q1JHSScCewF3NrM++5RzQFgluZvCH9b6z3jgGmALCiOCx4F7M9b7PXADhcMiXYCzACJiITCSwlVEdRRGFOdR+u/9iZJWAsuA6cAS4HMRsSij7+bA5anOtyj8cb8wLftT+l4iaXaJ+wZ4Auiftnkp8PWIWJKW/TuwO7AUuBi4qX6liFiV+j+SDq0NKd5o2sZXKYzOlgDnA1+NiHeaUZu1A/ILg8zMLItHEGZmlskBYWZmmRwQZmaWyQFhZmaZOjbdpXLtsMMO0bdv33KXYWa2SZk1a9Y7EdHkI2c26YDo27cv1dXV5S7DzGyTIinr2V/r8SEmMzPL5IAwM7NMDggzM8u0SZ+DMLNPpw8//JDa2lrWrFlT7lI2aV26dKF379506tRpo9Z3QJhZxamtrWWbbbahb9++rPtAWytVRLBkyRJqa2vp16/fRm3Dh5jMrOKsWbOG7t27OxxaQBLdu3dv0Sgs14CQ9CNJz0l6VtLN6XHL/SQ9IWmBpD9K6pz6bp7ma9LyvnnWZmaVzeHQci39N8wtINL7gM8CqiJiH6ADcBKFl6hcHRH9KTyqeExaZQywNCI+C1zNJy9bMTOzMsj7HERHYAtJH1J4+9WbwBEUXogOMJnCM/8nUHhu//jUfhvwK0kKP4/crN3rO+6uVt3eq5c3/mrvDh06sO+++7J27VoGDhzI5MmT2XLLLRtdZ0MefPBBrrzySu68806mT5/O888/z7hx4zL7Llu2jJtuuonTTz+9WfsYP348W2+9Neeee+5G1bghuQVERLwh6UrgdQovf5kBzAKWRcTa1K2WT96D24v0jt6IWCtpOdCdwstSPiZpLDAWYNddd82rfLOya+0/ipuS/z6uJx/WLivb/rfYYgvmzp0LwDe/+U2uu+46zjnnnI+XRwQRwWabNe8gzHHHHcdxxx23weXLli3jN7/5TbMDIi95HmLansKooB+Fl6RvBYzI6Fo/Qsg6WLbe6CEiJkZEVURU9ejR5KNEzMxa5JBDDqGmpoZXX32VgQMHcvrppzN48GAWLlzIjBkzGDp0KIMHD2bUqFGsXLkSgHvvvZc999yTYcOGMXXq1I+3dcMNN3DmmWcC8Pbbb3PCCSew//77s//++/Poo48ybtw4XnrpJQYNGsR5550HwBVXXMGBBx7Ifvvtx0UXffLG3UsvvZQBAwZw5JFHMn/+/Fx+9jxPUh8JvBIRdRHxITAV+ALQNb1rGKA3n7yEvZb08vm0fDvg3RzrMzNr1Nq1a7nnnnvYd999AZg/fz6nnnoqc+bMYauttuKSSy7hr3/9K7Nnz6aqqoqrrrqKNWvW8N3vfpc77riDv/3tb7z11luZ2z7rrLM47LDDePrpp5k9ezZ77703l19+Obvvvjtz587liiuuYMaMGSxYsIAnn3ySuXPnMmvWLB566CFmzZrFLbfcwpw5c5g6dSpPPfVULj9/nucgXgeGpBfJrwaGA9XAAxResH4LMBqYlvpPT/OPpeUzff7BzMph9erVDBo0CCiMIMaMGcOiRYvo06cPQ4YUXvH9+OOP8/zzz3PwwQcD8MEHHzB06FBeeOEF+vXrR//+/QE45ZRTmDhx4nr7mDlzJlOmTAEK5zy22247li5duk6fGTNmMGPGDA444AAAVq5cyYIFC1ixYgUnnHDCx+dFGjts1RJ5noN4QtJtwGxgLTAHmAjcBdwi6ZLUNimtMgn4vaQaCiOHk/KqzcysMcXnIIpttdVWH09HBF/60pe4+eab1+kzd+7cVrtENyK44IIL+N73vrdO+zXXXNMmlwHneh9ERFwUEXtGxD4R8a2IeD8iXo6IgyLisxExKiLeT33XpPnPpuUv51mbmVlLDBkyhEceeYSamhoAVq1axYsvvsiee+7JK6+8wksvvQSwXoDUGz58OBMmTADgo48+4r333mObbbZhxYoVH/c56qijuP766z8+t/HGG2+wePFiDj30UG6//XZWr17NihUruOOOO3L5Gf2oDTOreNPPPJj9enctdxnr6NGjBzfccAMnn3wy77//PgCXXHIJe+yxBxMnTuSYY45hhx12YNiwYTz77LPrrf+LX/yCsWPHMmnSJDp06MCECRMYOnQoBx98MPvssw8jRozgiiuuYN68eQwdOhSArbfemhtvvJHBgwdz4oknMmjQIPr06cMhhxySy8+oTfkwf1VVVfiFQfZp1d4vc91p193Waau0gNhUzJs3j4EDB67TJmlWRFQ1ta6fxWRmZpkcEGZmlskBYWZmmRwQZmaWyVcxmdmmYdGccldQWXY+IPddeARhZmaZPIIws4q332/7tO4Gxz7YZJe365bwo/E/5/HZf2f77bahc6dOnH/6aE4YcURm/wcfrebK66Zw55Rr11vW9/PHUH3PjezQbfsWFt62HBBmZg1EBMd/+xxGjzqWm359GQCv1S5i+oyHylxZ23JAmJk1MPPhJ+ncuRPfP/XrH7f16b0zP/j2SaxZ8z7/dsFlVD8zj44dOnDVRefwxYMPXGf9Je8u4+QzLqRuyVIOGrQ3m+oNyT4HYWbWwHMvvszgffbMXPbrG24F4O/338rNv7mM0WdfxJo176/T5+KrJzLsoEHMmXEzx335MF5/I/uR35XOIwgzsyacceFPefjJuXTu3InePXfiB6edCMCen+1Hn96f4cWXX1un/0OPz2bqb68E4JgjD2H7rtu2ec2twSMIM7MG9t5jN2Y/+8LH87++7ALuv/U66pYsLflwUVs8jjtvDggzswaOGHYQa97/gAmT//Rx26rVawA49POD+cPt9wDw4kuv8fobbzFg977rrH/okMH8YWqhzz0zH2HpsvfapvBW5kNMZlbxnvnOa+y32Stttj9J/P9JP+dH43/O/5swmR7dt2erLbbgZxeexcijDuf74y5j3+H/QscOHbjh6ovZfPPO66x/0Y/GcvIZFzL4qG9w2JDB7NrrM21We2vy477NKpQf993gcd9tGBCbhBLvpK7Ix31LGiBpbtHnPUlnS+om6T5JC9L39qm/JF0rqUbSM5IG51WbmZk1LbeAiIj5ETEoIgYBnwNWAbcD44D7I6I/cH+aBxgB9E+fscCEvGozM7OmtdVJ6uHASxHxGjASmJzaJwPHp+mRwJQoeBzoKqlnG9VnZhUkiE325rJK0tJ/w7YKiJOA+jd37xQRbwKk7x1Tey9gYdE6taltHZLGSqqWVF1XV5djyWZWLq8t+5C1q95zSLRARLBkyRK6dOmy0dvI/SomSZ2B44ALmuqa0bbeb0dETAQmQuEkdYsLNLOK88snlvIDoE/Xd1D60zBP/h/CdSyf12SXLl260Lt3743eRVtc5joCmB0Rb6f5tyX1jIg30yGkxam9FtilaL3ewKI2qM/MKsx77/+TSx9ask7bq12+UaZqKtT45bnvoi0OMZ3MJ4eXAKYDo9P0aGBaUfup6WqmIcDy+kNRZmbW9nIdQUjaEvgS8L2i5suBWyWNAV4HRqX2u4GjgRoKVzydlmdtZmbWuFwDIiJWAd0btC2hcFVTw74BnJFnPWZmVjo/i8nMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwyOSDMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwyOSDMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwy5RoQkrpKuk3SC5LmSRoqqZuk+yQtSN/bp76SdK2kGknPSBqcZ21mZta4vEcQvwDujYg9gf2BecA44P6I6A/cn+YBRgD902csMCHn2szMrBG5BYSkbYFDgUkAEfFBRCwDRgKTU7fJwPFpeiQwJQoeB7pK6plXfWZm1rg8RxC7AXXA7yTNkfRbSVsBO0XEmwDpe8fUvxewsGj92tS2DkljJVVLqq6rq8uxfDOz9i3PgOgIDAYmRMQBwD/45HBSFmW0xXoNERMjoioiqnr06NE6lZqZ2XryDIhaoDYinkjzt1EIjLfrDx2l78VF/XcpWr83sCjH+szMrBG5BUREvAUslDQgNQ0HngemA6NT22hgWpqeDpyarmYaAiyvPxRlZmZtr2PO2/8B8AdJnYGXgdMohNKtksYArwOjUt+7gaOBGmBV6mtmZmWSa0BExFygKmPR8Iy+AZyRZz1mZlY630ltZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpapyYCQtJWkzdL0HpKOk9Qp/9LMzKycShlBPAR0kdSLwgt+TgNuyLMoMzMrv1ICQhGxCvga8MuIOAHYK9+yzMys3EoKCElDgW8Cd6W2vB/yZ2ZmZVZKQPwQuAC4PSKek7Qb8EC+ZZmZWbk1ORKIiIconIeon38ZOCvPoszMrPyaDAhJewDnAn2L+0fEEfmVZWZm5VbKuYQ/AdcBvwU+yrccMzOrFKUExNqImJB7JWZmVlFKOUl9h6TTJfWU1K3+U8rGJb0q6e+S5kqqTm3dJN0naUH63j61S9K1kmokPSNpcAt+LjMza6FSAmI0cB7wKDArfaqbsY8vRsSgiKh/9eg44P6I6E/hxrtxqX0E0D99xgIetZiZlVEpVzH1a+V9jgQOT9OTgQeBn6T2Kend1I9L6iqpZ0S82cr7NzOzEpTyLKZOks6SdFv6nNmMZzEFMEPSLEljU9tO9X/00/eOqb0XsLBo3drU1rCesZKqJVXX1dWVWIaZmTVXKSepJwCdgN+k+W+ltu+UsO7BEbFI0o7AfZJeaKSvMtpivYaIicBEgKqqqvWWm5lZ6yglIA6MiP2L5mdKerqUjUfEovS9WNLtwEHA2/WHjiT1BBan7rXALkWr9wYWlbIfMzNrfaWcpP5I0u71M+lRG03eD5EeE75N/TTwZeBZYDqFE9+k72lpejpwarqaaQiw3OcfzMzKp5QRxHnAA5JepnAYqA+FR343ZSfgdkn1+7kpIu6V9BRwq6QxwOvAqNT/buBooAZYVeI+zMwsJ6VcxXS/pP7AAAoB8UJEvF/Cei8D+2e0LwGGZ7QHcEYpRZuZWf42GBCSjoiImZK+1mDR7pKIiKk512ZmZmXU2AjiMGAmcGzGsgAcEGZmn2IbDIiIuChN/kdEvFK8TFJr3zxnZmYVppSrmP6c0XZbaxdiZmaVpbFzEHsCewPbNTgPsS3QJe/CzMysvBo7BzEA+CrQlXXPQ6wAvptnUWZmVn6NnYOYBkyTNDQiHmvDmszMrAKUcg7i+5K61s9I2l7S9TnWZGZmFaCUgNgvIpbVz0TEUuCA/EoyM7NKUEpAbFb/1jcovBGO0h7RYWZmm7BS/tD/HHhUUv2lraOAS/MryczMKkEpz2Kakt4nfQSFZzF9LSKez70yMzMrq8bug9g2It5Lh5TeAm4qWtYtIt5tiwLNzKw8GhtB3EThPohZrPtmN6X53XKsy8zMyqyx+yC+mr793CUzs3aosUNMgxtbMSJmt345ZmZWKRo7xPTz9N0FqAKepnB4aT/gCWBYKTuQ1AGoBt6IiK+mJ8HeAnQDZgPfiogPJG0OTAE+BywBToyIV5v9E5mZWavY4H0QEfHFiPgi8BowOCKqIuJzFG6Sq2nGPn4IzCua/xlwdUT0B5YCY1L7GGBpRHwWuDr1MzOzMinlRrk9I+Lv9TMR8SwwqJSNS+oNHAP8Ns2LwuWy9fdUTAaOT9Mj0zxp+fDU38zMyqCUG+XmSfotcCOFq5dOYd0RQWOuAc4Htknz3YFlEbE2zdcCvdJ0L2AhQESslbQ89X+nxH2ZmVkrKmUEcRrwHIVDRWcDz6e2Rkn6KrA4ImYVN2d0jRKWFW93rKRqSdV1dXVNlWFmZhuplDup10i6Drg7IuY3Y9sHA8dJOprCie5tKYwoukrqmEYRvYFFqX8tsAtQK6kjsB2w3s14ETERmAhQVVW1XoCYmVnraHIEIek4YC5wb5ofJGl6U+tFxAUR0Tsi+gInATMj4pvAA8DXU7fRwLQ0PT3Nk5bPjAgHgJlZmZRyiOki4CBgGUBEzAX6tmCfPwHOkVRD4RzDpNQ+Ceie2s8BxrVgH2Zm1kKlnKReGxHLW3JBUUQ8CDyYpl+mEDgN+6yh8KRYMzOrAKUExLOSvgF0kNQfOAt4NN+yzMys3Eo5xPQDYG/gfQoP8FtO4WomMzP7FGt0BJEek3FxRJwH/O+2KcnMzCpBoyOIiPiIwrORzMysnSnlHMScdFnrn4B/1DdGxNTcqjIzs7IrJSC6UXi66hFFbQE4IMzMPsVKCYjzIsLPQzIza2c2eA5C0rGS6oBnJNVK+kIb1mVmZmXW2EnqS4FDImJn4H8BP22bkszMrBI0FhBrI+IFgIh4gk8e2W1mZu1AY+cgdpR0zobmI+Kq/MoyM7Nyaywg/pt1Rw0N583M7FNsgwERERe3ZSFmZlZZSnkWk5mZtUMOCDMzy+SAMDOzTKW8cvT/FE1vnm85ZmZWKRq7k/p8SUP55P3RAI+VumFJXSQ9KelpSc9Juji195P0hKQFkv4oqXNq3zzN16TlfTfuRzIzs9bQ2AhiPoVXgO4m6W+SJlJ4Z/SAErf9PnBEROwPDAK+ImkI8DPg6ojoDywFxqT+Y4ClEfFZ4OrUz8zMyqSxgFgKXAjUAIcD16b2cZKafOVoFKxMs53SJyg8Ffa21D4ZOD5Nj0zzpOXD1ZIXYZuZWYs0FhBfAe4CdgeuAg4C/hERp0VESQ/uk9RB0lxgMXAf8BKwLCLWpi61QK803QtYCJCWLwe6Z2xzrKRqSdV1dXWllGFmZhthgwERERdGxHDgVeBGCjfV9ZD0sKQ7Stl4RHwUEYOA3hQCZmBWt/SdNVqI9RoiJkZEVURU9ejRo5QyzMxsI5TyPoi/RMRTwFOS/i0ihknaoTk7iYhlkh4EhgBdJXVMo4TewKLUrRbYBaiV1BHYDni3OfsxM7PW0+RlrhFxftHsv6a2Jl8gJKmHpK5pegvgSGAe8ACfXBk1GpiWpqenedLymRGx3gjCzMzaRikjiI9FxNPN6N4TmCypA4UgujUi7pT0PHCLpEuAOcCk1H8S8HtJNRRGDic1pzYzM2tdzQqI5oiIZ4ADMtpfpnA+omH7GgqX1ZqZWQXwozbMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwyOSDMzCyTA8LMzDLldqOcbWLGb1fuCirL+OXlrsCs7DyCMDOzTA4IMzPL5IAwM7NMDggzM8vkgDAzs0wOCDMzy+SAMDOzTA4IMzPLlFtASNpF0gOS5kl6TtIPU3s3SfdJWpC+t0/tknStpBpJz0ganFdtZmbWtDxHEGuBH0fEQGAIcIakvYBxwP0R0R+4P80DjAD6p89YYEKOtZmZWRNyC4iIeDMiZqfpFcA8oBcwEpicuk0Gjk/TI4EpUfA40FVSz7zqMzOzxrXJOQhJfYEDgCeAnSLiTSiECLBj6tYLWFi0Wm1qa7itsZKqJVXX1dXlWbaZWbuW+8P6JG0N/Bk4OyLek7TBrhltsV5DxERgIkBVVdV6y0vVd9xdG7vqp9KrXcpdgZlVmlxHEJI6UQiHP0TE1NT8dv2ho/S9OLXXArsUrd4bWJRnfWZmtmF5XsUkYBIwLyKuKlo0HRidpkcD04raT01XMw0BltcfijIzs7aX5yGmg4FvAX+XNDe1XQhcDtwqaQzwOjAqLbsbOBqoAVYBp+VYm5mZNSG3gIiIh8k+rwAwPKN/AGfkVY+ZmTWP76Q2M7NMDggzM8vkgDAzs0wOCDMzy+SAMDOzTA4IMzPL5IAwM7NMDggzM8vkgDAzs0wOCDMzy+SAMDOzTA4IMzPL5IAwM7NMDggzM8vkgDAzs0wOCDMzy+SAMDOzTHm+k/p6SYslPVvU1k3SfZIWpO/tU7skXSupRtIzkgbnVZeZmZUmzxHEDcBXGrSNA+6PiP7A/WkeYATQP33GAhNyrMvMzEqQW0BExEPAuw2aRwKT0/Rk4Pii9ilR8DjQVVLPvGozM7OmtfU5iJ0i4k2A9L1jau8FLCzqV5va1iNprKRqSdV1dXW5Fmtm1p5VyklqZbRFVseImBgRVRFR1aNHj5zLMjNrv9o6IN6uP3SUvhen9lpgl6J+vYFFbVybmZkVaeuAmA6MTtOjgWlF7aemq5mGAMvrD0WZmVl5dMxrw5JuBg4HdpBUC1wEXA7cKmkM8DowKnW/GzgaqAFWAaflVZeZmZUmt4CIiJM3sGh4Rt8AzsirFjMza75KOUltZmYVxgFhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkcEGZmlqmiAkLSVyTNl1QjaVy56zEza88qJiAkdQB+DYwA9gJOlrRXeasyM2u/KiYggIOAmoh4OSI+AG4BRpa5JjOzdqtjuQso0gtYWDRfC3y+YSdJY4GxaXalpPltUNunnmAH4J1y11ExLla5K7AG/DvaQMt+R/uU0qmSAiLrp431GiImAhPzL6d9kVQdEVXlrsNsQ/w72vYq6RBTLbBL0XxvYFGZajEza/cqKSCeAvpL6iepM3ASML3MNZmZtVsVc4gpItZKOhP4C9ABuD4initzWe2JD9tZpfPvaBtTxHqH+c3MzCrqEJOZmVUQB4SZmWVyQLRzkq6XtFjSs+WuxSyLpF0kPSBpnqTnJP2w3DW1Fz4H0c5JOhRYCUyJiH3KXY9ZQ5J6Aj0jYrakbYBZwPER8XyZS/vU8wiinYuIh4B3y12H2YZExJsRMTtNrwDmUXjyguXMAWFmmwxJfYEDgCfKW0n74IAws02CpK2BPwNnR8R75a6nPXBAmFnFk9SJQjj8ISKmlrue9sIBYWYVTZKAScC8iLiq3PW0Jw6Idk7SzcBjwABJtZLGlLsmswYOBr4FHCFpbvocXe6i2gNf5mpmZpk8gjAzs0wOCDMzy+SAMDOzTA4IMzPL5IAwM7NMDgizEkha2Yy+4yWdm9f2zdqKA8LMzDI5IMw2kqRjJT0haY6kv0raqWjx/pJmSlog6btF65wn6SlJz0i6uAxlm5XMAWG28R4GhkTEAcAtwPlFy/YDjgGGAv9X0s6Svgz0Bw4CBgGfS+/jMKtIHctdgNkmrDfwx/RCm87AK0XLpkXEamC1pAcohMIw4MvAnNRnawqB8VDblWxWOgeE2cb7JXBVREyXdDgwvmhZw2fYBCDgpxHxX21TnlnL+BCT2cbbDngjTY9usGykpC6SugOHA08BfwG+nd5rgKReknZsq2LNmssjCLPSbCmptmj+Kgojhj9JegN4HOhXtPxJ4C5gV+A/I2IRsEjSQOCxwhOsWQmcAizOv3yz5vPTXM3MLJMPMZmZWSYHhJmZZXJAmJlZJgeEmZllckCYmVkmB4SZmWVyQJiZWab/AdCnXr3vH4ZlAAAAAElFTkSuQmCC\n", "text/plain": [ "
" ] }, - "metadata": {}, + "metadata": { + "needs_background": "light" + }, "output_type": "display_data" } ], @@ -628,17 +641,19 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 24, "metadata": {}, "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAG2ZJREFUeJzt3Xm0ZGV97vHvQ4MgyiA0GgRii7YDGCXYKCTeqGByFRSICxIMRjAowSGamGhwyCLGOGCuQ4hGQsSAU0RwAJVc42IQjUJsZJBBLi0BaUFRhhaZAvi7f+z3kOKw+5w6zalT1d3fz1pn9Z73r94+p5569656K1WFJEnTbTDuAiRJk8mAkCT1MiAkSb0MCElSLwNCktTLgJAk9TIg9KAk+eskn1zDfQ9N8s0Z1p+d5BVt+uAk/76G57k0yXPWZN81PF8lefxC79v2f0uSj67p/j3H+0WSHdv0CUn+dh6PfWySv5qv42n+GRDroSRXJ7mj/fH/JMm/JHn4uOuaSVV9qqp+Z7bt+p7Eqmrnqjp7LudLsqQ9WW84x1JHpgXmnUluTfLzJOcnOTLJxlPbVNW7quoVQx5r1u2q6uFVddU81P6AFwNVdURVvePBHlujY0Csv15UVQ8HdgV2A942fYN0/B2ZLK+tqs2AbYE/Bw4CTk+S+TzJJAWjxsc//vVcVf0I+DfgKXDfK8t3JvkP4HZgxySPTnJakpuSrEjyymmH2STJSe2V7XeTPG1qRXuF+4O27rIkvztt3yT5hySrknw/yV59dQ6+Am3B9YEkN7T9Lk7ylCSHAwcDb2q9oy+17a9O8rw2vahdhpmq6fwkO8ylzZI8I8m3k9yS5PokH0rykGmb7Z3kqiQ/S/J3g0Gb5I+SXJ7k5iRfTfKYuZwfoKpua72ifYE9gH3ase+75JdkkySfTHJjq/U7SR6V5J3A/wI+1NrpQ237SvKaJFcCVw4sG7zktTjJ11rbfX2q9r4e11QvJcmTgWOBPdr5bmnr79fbS/LK9vt1U/t9e/TAukpyRJIrW7t9eL5DUQ9kQKzn2pPj3sAFA4v/EDgc2Ay4BvhXYCXwaOAA4F3Tnsj3A04GtgI+DXwxyUZt3Q/onoy2AN4OfDLJtgP7PhO4ClgMHAV8PslWs5T9O8BvAU8AtgR+H7ixqo4DPgW8t10aeVHPvm8AXtIe8+bAH9EF4VzcC/xZq3kPYC/g1dO2+V1gGV0Pbb92HpLsD7wFeDGwDfANuvZdI1X1Q2A5XRtPdwhdu+8AbA0cAdxRVW9t531ta6fXDuyzP93/yU6rOeXBwDvoHvuFdO09W42Xt3N/u51vy+nbJNkTeDfwe3S9o2uAz0zb7IV0vd2nte3+92zn1oNjQKy/vtheyX0T+DrwroF1J1TVpVV1D/ArwLOAv6yqO6vqQuCjdCEy5fyqOqWq7gbeD2wC7A5QVSdX1XVV9cuqOonulekzBva9AfhgVd3d1l9BezU8g7vpwutJQKrq8qq6fsjH/QrgbVV1RXUuqqobh9yX9pjOr6pzq+qeqroa+Cfg2dM2O7qqbmpP4B+kCyWAPwbe3Wq+h67dd1mTXsSA6+jCebq76YLh8VV1b6v757Mc692t7jtWs/4rVXVOVd0FvJWuVzCnHthqHAx8rKq+24795nbsJQPbvKeqbmltehawyzycVzMwINZf+1fVllX1mKp69bQnhGsHph8N3FRVtw4suwbYrm/7qvol/9PbIMnLklzYLnHcQncpa/HAvj+q+48Yec3UvqtTVWcCHwI+DPwkyXFJNp/tATc70PVq1liSJyT5cpIfJ/k53ZP84mmbDbbh4GN6DPD3A+1xExDu355ztV07znSfAL4KfCbJdUneO9CzW51rh11fVb9o553x/2tIj6Zrp8Fj38j92+XHA9O3AxP9xop1gQGhPoNP2NcBWyXZbGDZrwI/Gpi/7xVku9a+PXBde1X8z8Brga3bpYVL6J4Qp2w37Vryr7Zzzlxg1TFV9XRgZ7pLTW/sqb3PtcDjZjv+LD4CfB9YWlWb010ymn49fPBV9eBjuhb44xbOUz8PrapvrUkh7dX70+kuGd1P65W9vap2An6D7hLNy6ZWr+aQs7Xf4P/1w+l6LtcBt7XFmw5s+ytzOO51dOE5deyH0fV+frTaPTRyBoRmVFXXAt8C3t1uej4VOIz7X3t+epIXtxuUfwrcBZwLPIzuieGnAEleTrsZPuCRwOuSbJTkQODJwOkz1ZRktyTPbK+GbwPupLsvAPATYMcZdv8o8I4kS9vN7qcm2XqG7Tduj3vqZwO6y1s/B36R5EnAq3r2e2OSR7Qn8NcDJ7XlxwJvTrJzeyxbtMc9J0k2TfJs4FTgP+lpsyTPTfJrSRa1eu9m+HZanb2TPKvdlH8HcF5VXVtVP6V7Mn9pujcC/BH3D+KfANv33Myf8mng5Ul2Sfe23Xe1Y1+9BjVqnhgQGsZLgCV0r/K+ABxVVV8bWH8q3Y3im+nuTby4vXq9DHgf8G26J4hfA/5j2rHPA5YCPwPeCRwwxD2Bzel6JjfTXZa4Efg/bd3xwE7tEs4Xe/Z9P/BZ4N/pnjSPBx46w7l+Adwx8LMn8BfAHwC3tjpO6tnvVOB8uhu5X2nnoaq+ABxNd9nn53Q9qhfM8ngHfSjJrXTt+UHgc8Dz26W96X4FOKU9zsvp7jVNfajx74ED2juCjpnD+T9N92aCm+h6LgcPrHslXU/uRrqe3WCv6EzgUuDHSX42/aBVdQbwV+3xXE8XLgfNoS6NQPzCIElSH3sQkqReBoQkqZcBIUnqZUBIknqt1QNyLV68uJYsWTLuMiRprXL++ef/rKq2mW27tToglixZwvLly8ddhiStVZJcM/tWXmKSJK2GASFJ6mVASJJ6GRCSpF4GhCSplwEhSeplQEiSehkQkqReBoQkqdda/Unq7/1oFUuO/Moa73/1e/aZx2okad1iD0KS1MuAkCT1MiAkSb0MCElSLwNCktTLgJAk9TIgJEm9DAhJUi8DQpLUy4CQJPUyICRJvQwISVIvA0KS1MuAkCT1MiAkSb0MCElSr5EHRJJFSS5I8uU2/9gk5yW5MslJSR7Slm/c5le09UtGXZskafUWogfxeuDygfmjgQ9U1VLgZuCwtvww4OaqejzwgbadJGlMRhoQSbYH9gE+2uYD7Amc0jY5Edi/Te/X5mnr92rbS5LGYNQ9iA8CbwJ+2ea3Bm6pqnva/Epguza9HXAtQFu/qm1/P0kOT7I8yfJ7b181ytolab02soBI8kLghqo6f3Bxz6Y1xLr/WVB1XFUtq6plizbdYh4qlST12XCEx/5NYN8kewObAJvT9Si2TLJh6yVsD1zXtl8J7ACsTLIhsAVw0wjrkyTNYGQ9iKp6c1VtX1VLgIOAM6vqYOAs4IC22SHAqW36tDZPW39mVT2gByFJWhjj+BzEXwJvSLKC7h7D8W358cDWbfkbgCPHUJskqRnlJab7VNXZwNlt+irgGT3b3AkcuBD1SJJm5yepJUm9DAhJUi8DQpLUy4CQJPUyICRJvQwISVIvA0KS1MuAkCT1MiAkSb0MCElSLwNCktTLgJAk9TIgJEm9DAhJUi8DQpLUy4CQJPUyICRJvQwISVIvA0KS1MuAkCT1MiAkSb0MCElSLwNCktTLgJAk9TIgJEm9DAhJUi8DQpLUy4CQJPUyICRJvQwISVIvA0KS1MuAkCT1MiAkSb0MCElSLwNCktTLgJAk9TIgJEm9DAhJUq+RBUSSTZL8Z5KLklya5O1t+WOTnJfkyiQnJXlIW75xm1/R1i8ZVW2SpNmNsgdxF7BnVT0N2AV4fpLdgaOBD1TVUuBm4LC2/WHAzVX1eOADbTtJ0pjMGhBJHpZkgzb9hCT7Jtlotv2q84s2u1H7KWBP4JS2/ERg/za9X5unrd8rSYZ+JJKkeTVMD+IcYJMk2wFnAC8HThjm4EkWJbkQuAH4GvAD4JaquqdtshLYrk1vB1wL0NavArbuOebhSZYnWX7v7auGKUOStAaGCYhU1e3Ai4F/qKrfBXYa5uBVdW9V7QJsDzwDeHLfZlPnmWHd4DGPq6plVbVs0aZbDFOGJGkNDBUQSfYADga+0pZtOJeTVNUtwNnA7sCWSab23x64rk2vBHZoJ9wQ2AK4aS7nkSTNn2EC4vXAm4EvVNWlSXYEzpptpyTbJNmyTT8UeB5wedv3gLbZIcCpbfq0Nk9bf2ZVPaAHIUlaGLP2BKrqHLr7EFPzVwGvG+LY2wInJllEF0SfraovJ7kM+EySvwUuAI5v2x8PfCLJCrqew0FzeiSSpHk1a0AkeQLwF8CSwe2ras+Z9quqi4Ff71l+Fd39iOnL7wQOnLViSdKCGOZewsnAscBHgXtHW44kaVIMExD3VNVHRl6JJGmiDHOT+ktJXp1k2yRbTf2MvDJJ0lgN04OYemfRGweWFbDj/JcjSZoUw7yL6bELUYgkabIM8y6mjYBXAb/VFp0N/FNV3T3CuiRJYzbMJaaP0A20949t/g/bsleMqihJ0vgNExC7tSG7p5yZ5KJRFSRJmgzDvIvp3iSPm5ppQ234eQhJWscN04N4I3BWkqvoRlx9DN2Q35Kkddgw72I6I8lS4Il0AfH9qrpr5JVJksZqtQGRZM+qOjPJi6etelwSqurzI65NkjRGM/Ugng2cCbyoZ10BBoQkrcNWGxBVdVSb/Juq+q/BdUn88JwkreOGeRfT53qWnTLfhUiSJstM9yCeBOwMbDHtPsTmwCajLkySNF4z3YN4IvBCYEvufx/iVuCVoyxKkjR+M92DOBU4NckeVfXtBaxJkjQBhrkHcUSSLadmkjwiycdGWJMkaQIMExBPrapbpmaq6mZ6vmtakrRuGSYgNkjyiKmZ9m1ywwzRIUlaiw3zRP8+4FtJpt7aeiDwztGVJEmaBMOMxfTxJMuBPenGYnpxVV028sokSWM10+cgNq+qn7dLSj8GPj2wbququmkhCpQkjcdMPYhP030O4ny6sZempM3vOMK6JEljNtPnIF7Y/nXcJUlaD810iWnXmXasqu/OfzmSpEkx0yWm97V/NwGWARfRXV56KnAe8KzRliZJGqfVfg6iqp5bVc8FrgF2raplVfV0ug/JrVioAiVJ4zHMB+WeVFXfm5qpqkuAXUZXkiRpEgzzQbnLk3wU+CTdu5deClw+0qokSWM3TEC8HHgV8Po2fw7wkZFVJEmaCMN8kvrOJMcCp1fVFQtQkyRpAsx6DyLJvsCFwP9t87skOW3UhUmSxmuYm9RHAc8AbgGoqguBJSOsSZI0AYYJiHuqatXIK5EkTZRhblJfkuQPgEVJlgKvA7412rIkSeM2TA/iT4CdgbvoBvBbBfzpbDsl2SHJWUkuT3Jpkte35Vsl+VqSK9u/j2jLk+SYJCuSXDzbUB+SpNGaMSCSLALeXlVvrard2s/bqurOIY59D/DnVfVkYHfgNUl2Ao4EzqiqpcAZbR7gBcDS9nM4vpVWksZqxoCoqnuBp6/Jgavq+qkB/arqVroP120H7Aec2DY7Edi/Te8HfLw65wJbJtl2Tc4tSXrwhrkHcUF7W+vJwG1TC6vq88OeJMkSujGczgMeVVXXt2Ncn+SRbbPtgGsHdlvZll0/7HkkSfNnmIDYCriR7itHpxQwVEAkeTjwOeBP2zfUrXbTnmX1gI2Sw+kuQbFo822GKUGStAaGCYg3VtXP1uTgSTaiC4dPDfQ4fpJk29Z72Ba4oS1fCewwsPv2wHXTj1lVxwHHAWy87dIHBIgkaX6s9h5Ekhcl+SlwcZKVSX5jLgdO11U4Hri8qt4/sOo04JA2fQhw6sDyl7V3M+0OrJq6FCVJWngz9SDeCfyvqvp+kmcC7wWePYdj/ybwh8D3klzYlr0FeA/w2SSHAT8EDmzrTgf2pvuuidvpBgmUJI3JTAFxT1V9H6Cqzkuy2VwOXFXfpP++AsBePdsX8Jq5nEOSNDozBcQjk7xhdfPTLhtJktYxMwXEPwObzTAvSVqHrTYgqurtC1mIJGmyDDMWkyRpPWRASJJ6GRCSpF7DfOXo2wamNx5tOZKkSTHTJ6nflGQP4ICBxd8efUmSpEkw09tcr6D7lPOOSb5BN1z31kmeWFVXLEh1kqSxmekS0810Q2OsAJ4DHNOWH5nErxyVpHXcTD2I5wNHAY8D3g9cBNxWVY6RJEnrgdX2IKrqLVW1F3A18Em6MNkmyTeTfGmB6pMkjckw3wfx1ar6DvCdJK+qqmclWTzqwiRJ4zXr21yr6k0Ds4e2ZWv0BUKSpLXHnD4oV1UXjaoQSdJk8ZPUkqReBoQkqZcBIUnqZUBIknoZEJKkXgaEJKmXASFJ6mVASJJ6DTPUxjpryZFfedDHuPo9+8xDJZI0eexBSJJ6GRCSpF4GhCSplwEhSeplQEiSehkQkqReBoQkqZcBIUnqZUBIknoZEJKkXgaEJKmXASFJ6mVASJJ6GRCSpF4jC4gkH0tyQ5JLBpZtleRrSa5s/z6iLU+SY5KsSHJxkl1HVZckaTij7EGcADx/2rIjgTOqailwRpsHeAGwtP0cDnxkhHVJkoYwsoCoqnOAm6Yt3g84sU2fCOw/sPzj1TkX2DLJtqOqTZI0u4W+B/GoqroeoP37yLZ8O+Dage1WtmUPkOTwJMuTLL/39lUjLVaS1meTcpM6Pcuqb8OqOq6qllXVskWbbjHisiRp/bXQAfGTqUtH7d8b2vKVwA4D220PXLfAtUmSBix0QJwGHNKmDwFOHVj+svZupt2BVVOXoiRJ47HhqA6c5F+B5wCLk6wEjgLeA3w2yWHAD4ED2+anA3sDK4DbgZePqi5J0nBGFhBV9ZLVrNqrZ9sCXjOqWiRJczcpN6klSRPGgJAk9TIgJEm9DAhJUi8DQpLUy4CQJPUyICRJvQwISVIvA0KS1MuAkCT1MiAkSb0MCElSLwNCktRrZKO5ri+WHPmVB7X/1e/ZZ54qkaT5ZQ9CktTLgJAk9TIgJEm9DAhJUi8DQpLUy4CQJPUyICRJvQwISVIvA0KS1MuAkCT1cqgNSZoQkzZ0jz0ISVIvexBjNmmvGCRpigEhSfPgwb7Ym0QGxFrOHoikUTEg1nOT8KrHkJImkwEhrQPsST54k/BiadIYEJIetPl4cjWkJo8BobWer57XDb6CnzwGhMbOJwZpMhkQ0gQwJDWJ/CS1JKmXPQhJ9mDUy4DQes8nR6nfRF1iSvL8JFckWZHkyHHXI0nrs4kJiCSLgA8DLwB2Al6SZKfxViVJ66+JCQjgGcCKqrqqqv4b+Ayw35hrkqT11iTdg9gOuHZgfiXwzOkbJTkcOLzN3nXN0S+8ZAFqm2SLgZ+Nu4gJYDvYBrCet0GOvm9ytnZ4zDDHm6SASM+yesCCquOA4wCSLK+qZaMubJLZBh3bwTYA22DKfLXDJF1iWgnsMDC/PXDdmGqRpPXeJAXEd4ClSR6b5CHAQcBpY65JktZbE3OJqaruSfJa4KvAIuBjVXXpLLsdN/rKJp5t0LEdbAOwDabMSzuk6gGX+SVJmqhLTJKkCWJASJJ6rRUBMdsQHEk2TnJSW39ekiULX+VoDdEGb0hyWZKLk5yRZKj3Oa9Nhh2KJckBSSrJOvl2x2HaIcnvtd+HS5N8eqFrHLUh/h5+NclZSS5ofxN7j6POUUrysSQ3JOn9LFg6x7Q2ujjJrnM+SVVN9A/dDesfADsCDwEuAnaats2rgWPb9EHASeOuewxt8Fxg0zb9qvWxDdp2mwHnAOcCy8Zd95h+F5YCFwCPaPOPHHfdY2iD44BXtemdgKvHXfcI2uG3gF2BS1azfm/g3+g+Y7Y7cN5cz7E29CCGGYJjP+DENn0KsFeSvg/era1mbYOqOquqbm+z59J9jmRdMuxQLO8A3gvcuZDFLaBh2uGVwIer6maAqrphgWsctWHaoIDN2/QWrIOfqaqqc4CbZthkP+Dj1TkX2DLJtnM5x9oQEH1DcGy3um2q6h5gFbD1glS3MIZpg0GH0b1yWJfM2gZJfh3Yoaq+vJCFLbBhfheeADwhyX8kOTfJ8xesuoUxTBv8NfDSJCuB04E/WZjSJspcnzceYGI+BzGDYYbgGGqYjrXY0I8vyUuBZcCzR1rRwpuxDZJsAHwAOHShChqTYX4XNqS7zPQcup7kN5I8papuGXFtC2WYNngJcEJVvS/JHsAnWhv8cvTlTYwH/by4NvQghhmC475tkmxI16Wcqeu1thlqGJIkzwPeCuxbVXctUG0LZbY22Ax4CnB2kqvprrmetg7eqB727+HUqrq7qv4LuIIuMNYVw7TBYcBnAarq28AmdAPYrU8e9PBFa0NADDMEx2nAIW36AODMandp1hGztkG7vPJPdOGwrl1zhlnaoKpWVdXiqlpSVUvo7sPsW1XLx1PuyAzz9/BFujctkGQx3SWnqxa0ytEapg1+COwFkOTJdAHx0wWtcvxOA17W3s20O7Cqqq6fywEm/hJTrWYIjiR/AyyvqtOA4+m6kCvoeg4Hja/i+TdkG/wd8HDg5HZ//odVte/Yip5nQ7bBOm/Idvgq8DtJLgPuBd5YVTeOr+r5NWQb/Dnwz0n+jO6yyqHr2ItGkvwr3WXExe1ey1HARgBVdSzdvZe9gRXA7cDL53yOdazNJEnzZG24xCRJGgMDQpLUy4CQJPUyICRJvQwISVIvA0LrnST3JrkwySVJTk6y6Rz3/8Uctz8hyQE9y5clOaZNH5rkQ236iCQvG1j+6LmcT5ovBoTWR3dU1S5V9RTgv4EjBle2DxaN/G+jqpZX1et6lh9bVR9vs4cCBoTGwoDQ+u4bwOOTLElyeZJ/BL4L7JDkJUm+13oaRw/ulOR9Sb7bvntjm7bslUm+k+SiJJ+b1jN5XpJvJPl/SV7Ytn9OkgcMLJjkr5P8Ret1LAM+1Xo8+yT5wsB2v53k8/PfJFLHgNB6q43b9QLge23RE+mGR/514G7gaGBPYBdgtyT7t+0eBny3qnYFvk73CVaAz1fVblX1NOByuvGApiyhG0BxH+DYJJvMVl9VnQIsBw6uql3oPhn75KlAovtk7L/M+YFLQzIgtD56aJIL6Z58f0g3VAvANW3cfIDdgLOr6qdtCPlP0X1BC8AvgZPa9CeBZ7Xpp7RewveAg4GdB8752ar6ZVVdSTcu0pPmWnQbKuITdMNYbwnswbo3rLsmyMSPxSSNwB3tFfl92vhVtw0umsPxpsarOQHYv6ouSnIo3Tg507dZ3fyw/gX4Et0XIp3cwksaCXsQUr/zgGcnWZxkEd33C3y9rduAbtRggD8AvtmmNwOuT7IRXQ9i0IFJNkjyOLqvyrxiyDpubccFoKquoxuy+W10gSSNjD0IqUdVXZ/kzcBZdL2J06vq1Lb6NmDnJOfTfXvh77flf0UXLNfQ3dfYbOCQV9AFzKOAI6rqziG/FfcEunsWdwB7VNUddJe7tqmqyx7EQ5Rm5Wiu0lqmfV7igqo6ftaNpQfBgJDWIq3Xchvw2+vgtwZqwhgQkqRe3qSWJPUyICRJvQwISVIvA0KS1MuAkCT1+v/JdkCK2zjYJQAAAABJRU5ErkJggg==\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAG3hJREFUeJzt3XmYZVV57/Hvj0EQZRAaDVNsURzAKEFUSExUMLkKCMQLCQYjGJSgMZqYYHDIg8aokFwnohEJGHCKCA6gkmt8GESjELuZZJBLS0BaUGRGpgC+94+9ihyK3VWnuuvUqa7+fp6nnt577bX3fs/qqvOetfbe66SqkCRpsrXGHYAkaX4yQUiSepkgJEm9TBCSpF4mCElSLxOEJKmXCUKrJMm7knxmJfc9OMl3pth+TpLXtuUDk/z7Sp7nsiQvWpl9V/J8leQpc71v2//tSY5f2f17jveLJNu25ROT/N0sHvvYJH8zW8fT7DNBrIGSXJPknvbH/7Mk/5LkseOOaypV9dmq+t3p6vW9iVXVDlV1zkzOl2Rxe7NeZ4ahjkxLmPcmuTPJHUmWJjkiyXoTdarqfVX12iGPNW29qnpsVV09C7E/4sNAVR1WVe9Z1WNrdEwQa66XV9VjgZ2A5wLvnFwhHX9H5pc3VtWGwBbAXwIHAGckyWyeZD4lRo2Pf/xruKr6CfBvwDPhoU+W703yH8DdwLZJtkxyepJbkixL8rpJh1k/ycntk+0FSZ49saF9wv1R23Z5kt+btG+S/GOS25P8MMnufXEOfgJtietDSW5s+12S5JlJDgUOBN7aekdfbfWvSfKStrx2G4aZiGlpkm1m0mZJnpfke0luS3JDko8medSkanskuTrJTUn+YTDRJvnjJFckuTXJN5I8cSbnB6iqu1qvaG9gV2DPduyHhvySrJ/kM0lubrF+P8kTkrwX+C3go62dPtrqV5I/TXIVcNVA2eCQ16Ik32xt962J2Pt6XBO9lCTPAI4Fdm3nu61tf1hvL8nr2u/XLe33bcuBbZXksCRXtXb72GwnRT2SCWIN194c9wAuHCj+I+BQYEPgWuBfgeXAlsB+wPsmvZHvA5wCbAp8DvhKknXbth/RvRltDLwb+EySLQb2fT5wNbAIOBL4UpJNpwn7d4HfBp4KbAL8AXBzVR0HfBb4+zY08vKefd8CvLK95o2AP6ZLhDPxIPAXLeZdgd2BN0yq83vAznQ9tH3aeUiyL/B24BXA5sC36dp3pVTVj4EldG082UF07b4NsBlwGHBPVb2jnfeNrZ3eOLDPvnT/J9uv4JQHAu+he+0X0bX3dDFe0c79vXa+TSbXSbIb8H7g9+l6R9cCn59UbS+63u6zW73/Nd25tWpMEGuur7RPct8BvgW8b2DbiVV1WVU9APwK8ALgr6vq3qq6CDieLolMWFpVp1bV/cAHgfWBXQCq6pSqur6qfllVJ9N9Mn3ewL43Ah+uqvvb9itpn4ancD9d8no6kKq6oqpuGPJ1vxZ4Z1VdWZ2Lq+rmIfelvaalVXVeVT1QVdcAnwBeOKna0VV1S3sD/zBdUgL4E+D9LeYH6Np9x5XpRQy4ni45T3Y/XWJ4SlU92OK+Y5pjvb/Ffc8Ktn+9qs6tqvuAd9D1CmbUA1uBA4FPVtUF7dhva8dePFDnqKq6rbXp2cCOs3BeTcEEsebat6o2qaonVtUbJr0hXDewvCVwS1XdOVB2LbBVX/2q+iX/09sgyauTXNSGOG6jG8paNLDvT+rhM0ZeO7HvilTVWcBHgY8BP0tyXJKNpnvBzTZ0vZqVluSpSb6W5KdJ7qB7k180qdpgGw6+picCHxloj1uA8PD2nKmt2nEm+zTwDeDzSa5P8vcDPbsVuW7Y7VX1i3beKf+/hrQlXTsNHvtmHt4uPx1YvhuY1zdWLAQmCPUZfMO+Htg0yYYDZb8K/GRg/aFPkG2sfWvg+vap+J+BNwKbtaGFS+neECdsNWks+VfbOacOsOqYqnoOsAPdUNPhPbH3uQ548nTHn8bHgR8C21XVRnRDRpPHwwc/VQ++puuAP2nJeeLn0VX13ZUJpH16fw7dkNHDtF7Zu6tqe+A36IZoXj2xeQWHnK79Bv+vH0vXc7keuKsVbzBQ91dmcNzr6ZLnxLEfQ9f7+ckK99DImSA0paq6Dvgu8P520fNZwCE8fOz5OUle0S5Q/jlwH3Ae8Bi6N4afAyR5De1i+IDHA29Ksm6S/YFnAGdMFVOS5yZ5fvs0fBdwL911AYCfAdtOsfvxwHuSbNcudj8ryWZT1F+vve6Jn7XohrfuAH6R5OnA63v2OzzJ49ob+JuBk1v5scDbkuzQXsvG7XXPSJINkrwQOA34T3raLMmLk/xakrVbvPczfDutyB5JXtAuyr8HOL+qrquqn9O9mb8q3Y0Af8zDE/HPgK17LuZP+BzwmiQ7prtt933t2NesRIyaJSYIDeOVwGK6T3lfBo6sqm8ObD+N7kLxrXTXJl7RPr1eDnwA+B7dG8SvAf8x6djnA9sBNwHvBfYb4prARnQ9k1vphiVuBv5P23YCsH0bwvlKz74fBL4A/Dvdm+YJwKOnONcvgHsGfnYD/gr4Q+DOFsfJPfudBiylu5D79XYequrLwNF0wz530PWoXjbN6x300SR30rXnh4EvAi9tQ3uT/QpwanudV9Bda5p4qPEjwH7tjqBjZnD+z9HdTHALXc/lwIFtr6Pryd1M17Mb7BWdBVwG/DTJTZMPWlVnAn/TXs8NdMnlgBnEpRGIXxgkSepjD0KS1MsEIUnqZYKQJPUyQUiSeq3WE3ItWrSoFi9ePO4wJGm1snTp0puqavPp6q3WCWLx4sUsWbJk3GFI0molybXT13KISZK0AiYISVIvE4QkqZcJQpLUywQhSeplgpAk9TJBSJJ6mSAkSb1MEJKkXqv1k9Q/+MntLD7i6yu9/zVH7TmL0UjSwmIPQpLUywQhSeplgpAk9TJBSJJ6mSAkSb1MEJKkXiYISVIvE4QkqZcJQpLUywQhSeplgpAk9TJBSJJ6mSAkSb1MEJKkXiYISVIvE4QkqZcJQpLUywQhSeplgpAk9TJBSJJ6mSAkSb1MEJKkXiYISVIvE4QkqdfIE0SStZNcmORrbf1JSc5PclWSk5M8qpWv19aXte2LRx2bJGnF5qIH8WbgioH1o4EPVdV2wK3AIa38EODWqnoK8KFWT5I0JiNNEEm2BvYEjm/rAXYDTm1VTgL2bcv7tHXa9t1bfUnSGIy6B/Fh4K3AL9v6ZsBtVfVAW18ObNWWtwKuA2jbb2/1HybJoUmWJFny4N23jzJ2SVqjjSxBJNkLuLGqlg4W91StIbb9T0HVcVW1c1XtvPYGG89CpJKkPuuM8Ni/CeydZA9gfWAjuh7FJknWab2ErYHrW/3lwDbA8iTrABsDt4wwPknSFEbWg6iqt1XV1lW1GDgAOKuqDgTOBvZr1Q4CTmvLp7d12vazquoRPQhJ0twYx3MQfw28JckyumsMJ7TyE4DNWvlbgCPGEJskqRnlENNDquoc4Jy2fDXwvJ469wL7z0U8kqTp+SS1JKmXCUKS1MsEIUnqZYKQJPUyQUiSepkgJEm9TBCSpF4mCElSLxOEJKmXCUKS1MsEIUnqZYKQJPUyQUiSepkgJEm9TBCSpF4mCElSLxOEJKmXCUKS1MsEIUnqZYKQJPUyQUiSepkgJEm9TBCSpF4mCElSLxOEJKmXCUKS1MsEIUnqZYKQJPUyQUiSepkgJEm9TBCSpF4mCElSr2kTRJLHJFmrLT81yd5J1h19aJKkcRqmB3EusH6SrYAzgdcAJ44yKEnS+A2TIFJVdwOvAP6xqn4P2H60YUmSxm2oBJFkV+BA4OutbJ0hdlo/yX8muTjJZUne3cqflOT8JFclOTnJo1r5em19Wdu+eOVekiRpNgyTIN4MvA34clVdlmRb4Owh9rsP2K2qng3sCLw0yS7A0cCHqmo74FbgkFb/EODWqnoK8KFWT5I0JtMmiKo6t6r2rqqj2/rVVfWmIfarqvpFW123/RSwG3BqKz8J2Lct79PWadt3T5KhX4kkaVYNM1T0VOCvgMWD9atqtyH2XRtYCjwF+BjwI+C2qnqgVVkObNWWtwKua8d+IMntwGbATUO+FknSLJo2QQCnAMcCxwMPzuTgVfUgsGOSTYAvA8/oq9b+7est1OSCJIcChwKsvdHmMwlHkjQDwySIB6rq46tykqq6Lck5wC7AJknWab2IrYHrW7XlwDbA8iTrABsDt/Qc6zjgOID1ttjuEQlEkjQ7hrlI/dUkb0iyRZJNJ36m2ynJ5q3nQJJHAy8BrqC7wL1fq3YQcFpbPr2t07afVVUmAEkak2F6EBNv2ocPlBWw7TT7bQGc1K5DrAV8oaq+luRy4PNJ/g64EDih1T8B+HSSZXQ9hwOGfA2SpBGYNkFU1ZNW5sBVdQnw6z3lVwPP6ym/F9h/Zc4lSZp9w9zFtC7weuC3W9E5wCeq6v4RxiVJGrNhhpg+TvcMwz+19T9qZa8dVVCSpPEbJkE8tz0NPeGsJBePKiBJ0vwwzF1MDyZ58sRKm2pjRs9DSJJWP8P0IA4Hzk5yNd3DbE+km/JbkrSADXMX05lJtgOeRpcgflhV9408MknSWK0wQSTZrarOSvKKSZuenISq+tKIY5MkjdFUPYgXAmcBL+/ZVoAJQpIWsBUmiKo6si3+bVX91+C2JCv18JwkafUxzF1MX+wpO7WnTJK0gEx1DeLpwA7AxpOuQ2wErD/qwCRJ4zXVNYinAXsBm/Dw6xB3Aq8bZVCSpPGb6hrEacBpSXatqu/NYUySpHlgmGsQh018rwNAkscl+eQIY5IkzQPDJIhnVdVtEytVdSs903hLkhaWYRLEWkkeN7HSvk1umCk6JEmrsWHe6D8AfDfJxK2t+wPvHV1IkqT5YJi5mD6VZAmwG91cTK+oqstHHpkkaaymeg5io6q6ow0p/RT43MC2TavqlrkIUJI0HlP1ID5H9xzEUrq5lyakrW87wrgkSWM21XMQe7V/nXdJktZAUw0x7TTVjlV1weyHI0maL6YaYvpA+3d9YGfgYrrhpWcB5wMvGG1okqRxWuFzEFX14qp6MXAtsFNV7VxVz6F7SG7ZXAUoSRqPYR6Ue3pV/WBipaouBXYcXUiSpPlgmAflrkhyPPAZuruXXgVcMdKoJEljN0yCeA3weuDNbf1c4OMji0iSNC8M8yT1vUmOBc6oqivnICZJ0jww7TWIJHsDFwH/t63vmOT0UQcmSRqvYS5SHwk8D7gNoKouAhaPMCZJ0jwwTIJ4oKpuH3kkkqR5ZZiL1Jcm+UNg7STbAW8CvjvasCRJ4zZMD+LPgB2A++gm8Lsd+PNRBiVJGr8pexBJ1gbeXVWHA++Ym5AkSfPBlD2IqnoQeM4cxSJJmkeGuQZxYbut9RTgronCqvrSyKKSJI3dMNcgNgVupvvK0Ze3n72m2ynJNknOTnJFksuSvLmVb5rkm0muav8+rpUnyTFJliW5ZLrpxiVJozVMD+LwqrppJY79APCXVXVBkg2BpUm+CRwMnFlVRyU5AjgC+GvgZcB27ef5dNN5PH8lzitJmgUr7EEkeXmSnwOXJFme5DdmcuCqumHiS4Wq6k66Cf62AvYBTmrVTgL2bcv7AJ+qznnAJkm2mNnLkSTNlqmGmN4L/FZVbQn8b+D9K3uSJIvpvkfifOAJVXUDdEkEeHyrthVw3cBuy1vZ5GMdmmRJkiUP3u3ze5I0KlMliAeq6ocAVXU+sOHKnCDJY4EvAn9eVXdMVbWnrB5RUHVc+/KindfeYOOVCUmSNISprkE8PslbVrReVR+c7uBJ1qVLDp8duOvpZ0m2qKob2hDSja18ObDNwO5bA9cP8yIkSbNvqh7EP9P1GiZ+Jq9PKUmAE4ArJiWT04GD2vJBwGkD5a9udzPtAtw+MRQlSZp7K+xBVNW7V/HYvwn8EfCDJBe1srcDRwFfSHII8GNg/7btDGAPuu+7vpvui4okSWMyzG2uK6WqvkP/dQWA3XvqF/Cno4pHkjQzwzwoJ0laA5kgJEm9hvnK0XcOLK832nAkSfPFVE9SvzXJrsB+A8XfG31IkqT5YKqL1FfS3WG0bZJv002VsVmSp1XVlXMSnSRpbKYaYrqV7rbUZcCLgGNa+RFJ/MpRSVrgpupBvBQ4Engy8EHgYuCuqvL5BElaA6ywB1FVb6+q3YFrgM/QJZPNk3wnyVfnKD5J0pgM86DcN6rq+8D3k7y+ql6QZNGoA5Mkjde0t7lW1VsHVg9uZSvzBUKSpNXIjB6Uq6qLRxWIJGl+8UlqSVIvE4QkqZcJQpLUywQhSeplgpAk9TJBSJJ6mSAkSb1MEJKkXiYISVIvE4QkqZcJQpLUywQhSeplgpAk9TJBSJJ6mSAkSb2G+Ua5BWvxEV9f5WNcc9SesxCJJM0/9iAkSb1MEJKkXiYISVIvE4QkqZcJQpLUywQhSeplgpAk9TJBSJJ6mSAkSb1GliCSfDLJjUkuHSjbNMk3k1zV/n1cK0+SY5IsS3JJkp1GFZckaTij7EGcCLx0UtkRwJlVtR1wZlsHeBmwXfs5FPj4COOSJA1hZAmiqs4FbplUvA9wUls+Cdh3oPxT1TkP2CTJFqOKTZI0vbm+BvGEqroBoP37+Fa+FXDdQL3lrewRkhyaZEmSJQ/efftIg5WkNdl8uUidnrLqq1hVx1XVzlW189obbDzisCRpzTXXCeJnE0NH7d8bW/lyYJuBelsD189xbJKkAXOdIE4HDmrLBwGnDZS/ut3NtAtw+8RQlCRpPEb2hUFJ/hV4EbAoyXLgSOAo4AtJDgF+DOzfqp8B7AEsA+4GXjOquCRJwxlZgqiqV65g0+49dQv401HFIkmauflykVqSNM+YICRJvUwQkqReJghJUi8ThCSplwlCktTLBCFJ6mWCkCT1MkFIknqZICRJvUwQkqReJghJUi8ThCSplwlCktRrZNN9rykWH/H1Vdr/mqP2nKVIJGl22YOQJPUyQUiSepkgJEm9TBCSpF4mCElSLxOEJKmXCUKS1MvnICRpnphvz1XZg5Ak9TJBSJJ6OcS0mptvXVJJC4cJYsxW9Q1+PpzfJCMtTA4xSZJ62YOQJByu7WOC0Nj5h7nqbMOFMVw735ggtMoW4h+G5p6/R/OPCUKaB3xz1HzkRWpJUi97EFrjOX5vD0b9TBBa7fnmJo2GQ0ySpF7zqgeR5KXAR4C1geOr6qgxhyRNyx6MFqp504NIsjbwMeBlwPbAK5NsP96oJGnNNW8SBPA8YFlVXV1V/w18HthnzDFJ0hprPg0xbQVcN7C+HHj+5EpJDgUObav3XXv0XpfOQWzz2SLgpnEHMQ/YDrYBrOFtkKMfWpyuHZ44zPHmU4JIT1k9oqDqOOA4gCRLqmrnUQc2n9kGHdvBNgDbYMJstcN8GmJaDmwzsL41cP2YYpGkNd58ShDfB7ZL8qQkjwIOAE4fc0yStMaaN0NMVfVAkjcC36C7zfWTVXXZNLsdN/rI5j3boGM72AZgG0yYlXZI1SOG+SVJmldDTJKkecQEIUnqtVokiCQvTXJlkmVJjujZvl6Sk9v285MsnvsoR2uINnhLksuTXJLkzCRD3ee8OpmuDQbq7ZekkizI2x2HaYckv99+Hy5L8rm5jnHUhvh7+NUkZye5sP1N7DGOOEcpySeT3Jik91mwdI5pbXRJkp1mfJKqmtc/dBesfwRsCzwKuBjYflKdNwDHtuUDgJPHHfcY2uDFwAZt+fVrYhu0ehsC5wLnATuPO+4x/S5sB1wIPK6tP37ccY+hDY4DXt+WtweuGXfcI2iH3wZ2Ai5dwfY9gH+je8ZsF+D8mZ5jdehBDDMFxz7ASW35VGD3JH0P3q2upm2Dqjq7qu5uq+fRPUeykAw7Fct7gL8H7p3L4ObQMO3wOuBjVXUrQFXdOMcxjtowbVDARm15YxbgM1VVdS5wyxRV9gE+VZ3zgE2SbDGTc6wOCaJvCo6tVlSnqh4Abgc2m5Po5sYwbTDoELpPDgvJtG2Q5NeBbarqa3MZ2Bwb5nfhqcBTk/xHkvPaLMkLyTBt8C7gVUmWA2cAfzY3oc0rM33feIR58xzEFIaZgmOoaTpWY0O/viSvAnYGXjjSiObelG2QZC3gQ8DBcxXQmAzzu7AO3TDTi+h6kt9O8syqum3Esc2VYdrglcCJVfWBJLsCn25t8MvRhzdvrPL74urQgxhmCo6H6iRZh65LOVXXa3Uz1DQkSV4CvAPYu6rum6PY5sp0bbAh8EzgnCTX0I25nr4AL1QP+/dwWlXdX1X/BVxJlzAWimHa4BDgCwBV9T1gfboJ7NYkqzx90eqQIIaZguN04KC2vB9wVrWrNAvEtG3Qhlc+QZccFtqYM0zTBlV1e1UtqqrFVbWY7jrM3lW1ZDzhjswwfw9fobtpgSSL6Iacrp7TKEdrmDb4MbA7QJJn0CWIn89plON3OvDqdjfTLsDtVXXDTA4w74eYagVTcCT5W2BJVZ0OnEDXhVxG13M4YHwRz74h2+AfgMcCp7Tr8z+uqr3HFvQsG7INFrwh2+EbwO8muRx4EDi8qm4eX9Sza8g2+Evgn5P8Bd2wysEL7EMjSf6VbhhxUbvWciSwLkBVHUt37WUPYBlwN/CaGZ9jgbWZJGmWrA5DTJKkMTBBSJJ6mSAkSb1MEJKkXiYISVIvE4TWOEkeTHJRkkuTnJJkgxnu/4sZ1j8xyX495TsnOaYtH5zko235sCSvHijfcibnk2aLCUJronuqaseqeibw38Bhgxvbg0Uj/9uoqiVV9aae8mOr6lNt9WDABKGxMEFoTfdt4ClJFie5Isk/ARcA2yR5ZZIftJ7G0YM7JflAkgvad29s3spel+T7SS5O8sVJPZOXJPl2kv+XZK9W/0VJHjGxYJJ3Jfmr1uvYGfhs6/HsmeTLA/V+J8mXZr9JpI4JQmusNm/Xy4AftKKn0U2P/OvA/cDRwG7AjsBzk+zb6j0GuKCqdgK+RfcEK8CXquq5VfVs4Aq6+YAmLKabQHFP4Ngk608XX1WdCiwBDqyqHemejH3GREKiezL2X2b8wqUhmSC0Jnp0kovo3nx/TDdVC8C1bd58gOcC51TVz9sU8p+l+4IWgF8CJ7flzwAvaMvPbL2EHwAHAjsMnPMLVfXLqrqKbl6kp8806DZVxKfpprHeBNiVhTetu+aReT8XkzQC97RP5A9p81fdNVg0g+NNzFdzIrBvVV2c5GC6eXIm11nR+rD+Bfgq3RcindKSlzQS9iCkfucDL0yyKMnadN8v8K22bS26WYMB/hD4TlveELghybp0PYhB+ydZK8mT6b4q88oh47izHReAqrqebsrmd9IlJGlk7EFIParqhiRvA86m602cUVWntc13ATskWUr37YV/0Mr/hi6xXEt3XWPDgUNeSZdgngAcVlX3DvmtuCfSXbO4B9i1qu6hG+7avKouX4WXKE3L2Vyl1Ux7XuLCqjph2srSKjBBSKuR1mu5C/idBfitgZpnTBCSpF5epJYk9TJBSJJ6mSAkSb1MEJKkXiYISVKv/w+ArkMmSrytZAAAAABJRU5ErkJggg==\n", "text/plain": [ "
" ] }, - "metadata": {}, + "metadata": { + "needs_background": "light" + }, "output_type": "display_data" } ], @@ -660,7 +675,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 25, "metadata": {}, "outputs": [ { @@ -668,8 +683,8 @@ "output_type": "stream", "text": [ " y=1 y=2 \n", - " l=1 169 77 \n", - " l=2 34 720 \n" + " l=1 166 80 \n", + " l=2 32 722 \n" ] } ], @@ -711,7 +726,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 26, "metadata": { "scrolled": false }, @@ -756,21 +771,80 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 27, "metadata": { "scrolled": true }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 32/32 [00:00<00:00, 204.03it/s]\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ "Saving model at iteration 0 with best score 0.981\n", - "[E:0]\tTrain Loss: 0.463\tDev score: 0.981\n", - "[E:1]\tTrain Loss: 0.421\tDev score: 0.919\n", - "[E:2]\tTrain Loss: 0.415\tDev score: 0.938\n", - "[E:3]\tTrain Loss: 0.413\tDev score: 0.932\n", - "[E:4]\tTrain Loss: 0.411\tDev score: 0.918\n", + "[E:0]\tTrain Loss: 0.455\tDev score: 0.981\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 32/32 [00:00<00:00, 275.81it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[E:1]\tTrain Loss: 0.412\tDev score: 0.921\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 32/32 [00:00<00:00, 243.93it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[E:2]\tTrain Loss: 0.406\tDev score: 0.942\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 32/32 [00:00<00:00, 241.64it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[E:3]\tTrain Loss: 0.404\tDev score: 0.920\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 32/32 [00:00<00:00, 218.43it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[E:4]\tTrain Loss: 0.402\tDev score: 0.927\n", "Restoring best model from iteration 0 with score 0.981\n", "Finished Training\n", "Confusion Matrix (Dev)\n", @@ -781,7 +855,7 @@ } ], "source": [ - "end_model.train(Xs[0], Y_train_ps, Xs[1], Ys[1], l2=0.1, batch_size=256, \n", + "end_model.train((Xs[0], Y_train_ps), dev_data=(Xs[1], Ys[1]), l2=0.1, batch_size=256, \n", " n_epochs=5, print_every=1, validation_metric='f1')" ] }, @@ -801,7 +875,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 28, "metadata": { "scrolled": false }, @@ -824,12 +898,12 @@ ], "source": [ "print(\"Label Model:\")\n", - "score = label_model.score(Ls[2], Ys[2], metric=['precision', 'recall', 'f1'])\n", + "score = label_model.score((Ls[2], Ys[2]), metric=['precision', 'recall', 'f1'])\n", "\n", "print()\n", "\n", "print(\"End Model:\")\n", - "score = end_model.score(Xs[2], Ys[2], metric=['precision', 'recall', 'f1'])" + "score = end_model.score((Xs[2], Ys[2]), metric=['precision', 'recall', 'f1'])" ] }, { diff --git a/tutorials/Multitask.ipynb b/tutorials/Multitask.ipynb index c754197d..a4756026 100644 --- a/tutorials/Multitask.ipynb +++ b/tutorials/Multitask.ipynb @@ -58,7 +58,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -99,7 +99,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -123,7 +123,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -140,7 +140,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 9, "metadata": {}, "outputs": [ { @@ -149,12 +149,12 @@ "text": [ "Computing O...\n", "Estimating \\mu...\n", - "[E:0]\tTrain Loss: 4.034\n", - "[E:20]\tTrain Loss: 0.472\n", - "[E:40]\tTrain Loss: 0.111\n", + "[E:0]\tTrain Loss: 4.396\n", + "[E:20]\tTrain Loss: 0.614\n", + "[E:40]\tTrain Loss: 0.174\n", "[E:60]\tTrain Loss: 0.050\n", "[E:80]\tTrain Loss: 0.034\n", - "[E:100]\tTrain Loss: 0.028\n", + "[E:100]\tTrain Loss: 0.029\n", "[E:120]\tTrain Loss: 0.027\n", "[E:140]\tTrain Loss: 0.026\n", "[E:160]\tTrain Loss: 0.025\n", @@ -177,7 +177,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 10, "metadata": { "scrolled": false }, @@ -186,27 +186,27 @@ "name": "stdout", "output_type": "stream", "text": [ - "Accuracy: 0.910\n" + "Accuracy: 0.900\n" ] }, { "data": { "text/plain": [ - "0.91" + "0.9" ] }, - "execution_count": 16, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "label_model.score(Ls[1], Ys[1])" + "label_model.score((Ls[1], Ys[1]))" ] }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ @@ -243,7 +243,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 12, "metadata": {}, "outputs": [ { @@ -287,28 +287,88 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 13, "metadata": { "scrolled": true }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 25/25 [00:00<00:00, 153.98it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Saving model at iteration 0 with best score 0.873\n", + "[E:0]\tTrain Loss: 2.277\tDev score: 0.873\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 25/25 [00:00<00:00, 174.00it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Saving model at iteration 1 with best score 0.913\n", + "[E:1]\tTrain Loss: 1.324\tDev score: 0.913\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 25/25 [00:00<00:00, 245.52it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[E:2]\tTrain Loss: 1.033\tDev score: 0.913\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 25/25 [00:00<00:00, 243.85it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[E:3]\tTrain Loss: 0.899\tDev score: 0.900\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 25/25 [00:00<00:00, 229.92it/s]\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "Saving model at iteration 0 with best score 0.940\n", - "[E:0]\tTrain Loss: 2.264\tDev score: 0.940\n", - "[E:1]\tTrain Loss: 1.352\tDev score: 0.917\n", - "[E:2]\tTrain Loss: 1.069\tDev score: 0.900\n", - "[E:3]\tTrain Loss: 0.962\tDev score: 0.853\n", - "[E:4]\tTrain Loss: 0.909\tDev score: 0.890\n", - "Restoring best model from iteration 0 with score 0.940\n", + "[E:4]\tTrain Loss: 0.854\tDev score: 0.897\n", + "Restoring best model from iteration 1 with score 0.913\n", "Finished Training\n" ] } ], "source": [ - "end_model.train(Xs[0], Y_train_ps, Xs[1], Ys[1], n_epochs=5, seed=123)" + "end_model.train((Xs[0], Y_train_ps), dev_data=(Xs[1], Ys[1]), n_epochs=5, seed=123)" ] }, { @@ -327,7 +387,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 14, "metadata": { "scrolled": true }, @@ -337,21 +397,21 @@ "output_type": "stream", "text": [ "Label Model:\n", - "Accuracy: 0.880\n", + "Accuracy: 0.877\n", "\n", "End Model:\n", - "Accuracy: 0.917\n" + "Accuracy: 0.907\n" ] } ], "source": [ "print(\"Label Model:\")\n", - "score = label_model.score(Ls[2], Ys[2])\n", + "score = label_model.score((Ls[2], Ys[2]))\n", "\n", "print()\n", "\n", "print(\"End Model:\")\n", - "score = end_model.score(Xs[2], Ys[2])" + "score = end_model.score((Xs[2], Ys[2]))" ] }, { @@ -363,21 +423,21 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 15, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Accuracy (t=0): 0.940\n", + "Accuracy (t=0): 0.930\n", "Accuracy (t=1): 0.910\n", - "Accuracy (t=2): 0.900\n" + "Accuracy (t=2): 0.880\n" ] } ], "source": [ - "scores = end_model.score(Xs[2], Ys[2], reduce=None)" + "scores = end_model.score((Xs[2], Ys[2]), reduce=None)" ] }, { @@ -389,45 +449,38 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 17, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[array([2, 1, 1, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 1, 1,\n", - " 2, 2, 2, 2, 1, 2, 1, 1, 1, 2, 1, 2, 2, 1, 1, 2, 1, 2, 2, 1, 1, 1,\n", - " 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 1, 2, 1, 2, 2, 1, 1, 1, 1, 2, 2,\n", - " 1, 2, 1, 1, 2, 2, 1, 2, 1, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,\n", + " 2, 2, 2, 2, 1, 2, 1, 1, 1, 2, 1, 1, 2, 1, 1, 2, 1, 2, 2, 1, 1, 1,\n", + " 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 1, 2, 1, 2, 2, 1, 1, 1, 2, 2, 2,\n", + " 1, 2, 1, 1, 2, 2, 1, 2, 1, 2, 2, 1, 1, 2, 1, 1, 1, 2, 2, 2, 2, 2,\n", " 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1]),\n", - " array([3, 2, 1, 3, 2, 1, 2, 2, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 2, 1,\n", - " 3, 3, 3, 3, 2, 3, 1, 2, 2, 3, 1, 3, 3, 2, 2, 3, 2, 3, 3, 1, 1, 1,\n", - " 2, 1, 1, 2, 1, 3, 3, 2, 2, 2, 3, 2, 3, 2, 1, 3, 1, 1, 1, 1, 3, 3,\n", - " 1, 3, 2, 2, 3, 3, 1, 3, 2, 3, 3, 2, 1, 3, 1, 2, 1, 3, 3, 3, 3, 3,\n", - " 3, 1, 2, 1, 1, 1, 1, 3, 3, 3, 2, 2]),\n", - " array([1, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 3, 2, 1, 2, 2, 1, 1, 3, 2, 3, 3,\n", - " 2, 1, 2, 2, 3, 2, 3, 3, 3, 2, 3, 1, 1, 3, 3, 1, 3, 1, 1, 3, 1, 3,\n", + " array([3, 2, 1, 3, 2, 1, 2, 2, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 2, 1,\n", + " 3, 3, 3, 3, 2, 3, 1, 2, 3, 3, 2, 3, 3, 2, 2, 3, 2, 3, 3, 1, 1, 1,\n", + " 2, 1, 1, 2, 1, 3, 3, 2, 2, 2, 3, 2, 3, 1, 3, 3, 1, 1, 3, 3, 3, 3,\n", + " 1, 3, 2, 2, 3, 3, 1, 3, 2, 3, 3, 3, 1, 3, 1, 2, 1, 3, 3, 3, 3, 3,\n", + " 3, 3, 2, 1, 1, 1, 1, 3, 3, 3, 2, 2]),\n", + " array([1, 3, 3, 1, 3, 3, 3, 3, 3, 2, 1, 1, 2, 1, 2, 2, 1, 1, 3, 2, 3, 3,\n", + " 2, 1, 2, 1, 3, 2, 3, 3, 3, 2, 3, 3, 1, 3, 3, 1, 3, 1, 1, 3, 3, 3,\n", " 3, 3, 3, 3, 3, 2, 2, 3, 3, 3, 2, 3, 2, 3, 1, 1, 3, 3, 3, 1, 1, 1,\n", - " 3, 1, 3, 3, 3, 2, 3, 2, 3, 2, 2, 3, 3, 1, 3, 3, 3, 1, 2, 1, 2, 1,\n", - " 2, 3, 3, 3, 3, 3, 3, 1, 1, 2, 3, 3])]" + " 3, 1, 3, 3, 2, 2, 3, 2, 3, 2, 2, 3, 3, 1, 3, 3, 3, 2, 2, 2, 2, 1,\n", + " 2, 2, 3, 3, 3, 3, 3, 1, 1, 2, 3, 3])]" ] }, - "execution_count": 22, + "execution_count": 17, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "Y_p = end_model.predict(Xs[2], Ys[2])\n", + "Y_p = end_model.predict(Xs[2])\n", "Y_p" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { From 9996c91c6b299208603a565f7d0734dc8071e0f4 Mon Sep 17 00:00:00 2001 From: ajratner Date: Thu, 11 Oct 2018 15:20:26 -0700 Subject: [PATCH 17/35] - Got rid of _evaluate methods, transform everything to a DataLoader - Other small fixes --- metal/classifier.py | 179 ++++++++++++++----------------- metal/end_model/end_model.py | 41 +++---- metal/label_model/lm_defaults.py | 2 + metal/multitask/mt_end_model.py | 12 +-- 4 files changed, 100 insertions(+), 134 deletions(-) diff --git a/metal/classifier.py b/metal/classifier.py index 26d7ab13..f4d7aaa2 100644 --- a/metal/classifier.py +++ b/metal/classifier.py @@ -5,7 +5,7 @@ import torch import torch.nn as nn import torch.optim as optim -from torch.utils.data.dataloader import DataLoader +from torch.utils.data import DataLoader, Dataset, TensorDataset from tqdm import tqdm from metal.analysis import confusion_matrix @@ -112,21 +112,25 @@ def train(self, *args, **kwargs): """ raise NotImplementedError - def _train(self, train_loader, loss_fn, dev_loader=None): + def _train(self, train_data, loss_fn, dev_data=None): """The internal training routine called by train() after initial setup Args: - train_loader: a torch DataLoader of X (data) and Y (labels) for - the train split + train_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of + X (data) and Y (labels) for the train split loss_fn: the loss function to minimize (maps *data -> loss) - dev_loader: a torch DataLoader of X (data) and Y (labels) for - the dev split + dev_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of + X (data) and Y (labels) for the dev split - If dev_loader is not provided, then no checkpointing or + If dev_data is not provided, then no checkpointing or evaluation on the dev set will occur. """ train_config = self.config["train_config"] - evaluate_dev = dev_loader is not None + evaluate_dev = dev_data is not None + + # Convert data to DataLoaders + train_loader = self._create_data_loader(train_data) + dev_loader = self._create_data_loader(dev_data) # Set the optimizer optimizer_config = train_config["optimizer_config"] @@ -146,24 +150,22 @@ def _train(self, train_loader, loss_fn, dev_loader=None): # Moving model to GPU if train_config["use_cuda"]: - if self.config["verbose"]: print("Using GPU...") - self.cuda() # Train the model for epoch in range(train_config["n_epochs"]): epoch_loss = 0.0 - for batch, data in tqdm( + for batch_num, data in tqdm( enumerate(train_loader), total=len(train_loader), disable=train_config["disable_prog_bar"], ): - # moving data to GPU + # Moving data to GPU if train_config["use_cuda"]: - data = [d.cuda() for d in data] + data = data.cuda() # Zero the parameter gradients optimizer.zero_grad() @@ -247,14 +249,40 @@ def _train(self, train_loader, loss_fn, dev_loader=None): if self.config["verbose"]: print("Finished Training") - if evaluate_dev: - # Currently use default random break ties in evaluate - Y_p_dev, Y_dev = self.evaluate(dev_loader) + self.score( + dev_loader, + metric=["accuracy"], + verbose=True, + print_confusion_matrix=True, + ) + + def _create_dataset(self, data): + """Converts input data to the appropriate Dataset""" + return TensorDataset(*data) - if not self.multitask: - print("Confusion Matrix (Dev)") - confusion_matrix(Y_p_dev, Y_dev, pretty_print=True) + def _create_data_loader(self, data, **kwargs): + """Converts input data into a DataLoader""" + if data is None: + return None + + # Set DataLoader config + # NOTE: Not applicable if data is already a DataLoader + config = { + **self.config["train_config"]["data_loader_config"], + **kwargs, + "pin_memory": self.config["train_config"]["use_cuda"], + } + + # Return data as DataLoader + if isinstance(data, (tuple, list)): + return DataLoader(self._create_dataset(*data), **config) + elif isinstance(data, Dataset): + return DataLoader(data, **config) + elif isinstance(data, DataLoader): + return data + else: + raise ValueError("Input data type not recognized.") def _set_optimizer(self, optimizer_config): opt = optimizer_config["optimizer"] @@ -292,93 +320,19 @@ def _set_scheduler(self, scheduler_config, optimizer): ) return lr_scheduler - def _batch_evaluate(self, loader, break_ties="random", **kwargs): - """Evaluates the model using minibatches - - Args: - loader: Pytorch DataLoader supplying (X,Y): - X: The input for the predict method - Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels - in {1,...,k}; can be None for cases with no ground truth - - Returns: - Y_p: an np.ndarray of predictions - Y: an np.ndarray of ground truth labels - """ - Y = [] - Y_p = [] - for batch, data in enumerate(loader): - X_batch, Y_batch = data - - if self.config["train_config"]["use_cuda"]: - X_batch = X_batch.cuda() - - Y_batch = self._to_numpy(Y_batch) - - if Y_batch.ndim > 1: - Y_batch = self._break_ties(Y_batch, break_ties) - - Y.append(Y_batch) - Y_p.append( - self._to_numpy( - self.predict(X_batch, break_ties=break_ties, **kwargs) - ) - ) - - Y = np.hstack(Y) - Y_p = np.hstack(Y_p) - - return Y_p, Y - - def evaluate(self, data, break_ties="random", **kwargs): - """Evaluates the model - - Args: - data: either a Pytorch DataLoader or tuple supplying (X,Y): - X: The input for the predict method - Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels - in {1,...,k} - - Returns: - Y_p: an np.ndarray of predictions - Y: an np.ndarray of ground truth labels - """ - - if type(data) is tuple: - X, Y = data - - if self.config["train_config"]["use_cuda"]: - X = X.cuda() - - Y = self._to_numpy(Y) - - if Y.ndim > 1: - Y = self._break_ties(Y, break_ties) - - Y_p = self.predict(X, break_ties=break_ties, **kwargs) - - elif type(data) is DataLoader: - Y_p, Y = self._batch_evaluate(data, break_ties=break_ties) - - else: - raise ValueError( - "Unrecognized input data structure, use tuple or DataLoader!" - ) - - return Y_p, Y - def score( self, data, metric=["accuracy"], break_ties="random", verbose=True, + print_confusion_matrix=True, **kwargs, ): """Scores the predictive performance of the Classifier on all tasks Args: - data: either a Pytorch DataLoader or tuple supplying (X,Y): + data: a Pytorch DataLoader, Dataset, or tuple with Tensors (X,Y): X: The input for the predict method Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels in {1,...,k} @@ -387,20 +341,46 @@ def score( break_ties: How to break ties when making predictions verbose: The verbosity for just this score method; it will not update the class config. + print_confusion_matrix: Print confusion matrix Returns: scores: A (float) score """ + data_loader = self._create_data_loader(data) + Y_pred = [] + Y = [] - Y_p, Y = self.evaluate(data, break_ties=break_ties) + # Do batch evaluation by default, getting the predictions and labels + for batch_num, data in enumerate(data_loader): + Xb, Yb = data + Y.append(Yb) + + # Optionally move to GPU + if self.config["train_config"]["use_cuda"]: + Xb = Xb.cuda() + + # Append predictions and labels from DataLoader + Y_pred.append( + self._to_numpy( + self.predict(Xb, break_ties=break_ties, **kwargs) + ) + ) + Y_pred = np.hstack(Y_pred) + Y = np.hstack(Y) + + # Evaluate on the specified metrics metric_list = metric if isinstance(metric, list) else [metric] scores = [] for metric in metric_list: - score = metric_score(Y, Y_p, metric, ignore_in_gold=[0]) + score = metric_score(Y, Y_pred, metric, ignore_in_gold=[0]) scores.append(score) if verbose: print(f"{metric.capitalize()}: {score:.3f}") + # Optionally print confusion matrix + if print_confusion_matrix: + confusion_matrix(Y_pred, Y, pretty_print=True) + if isinstance(scores, list) and len(scores) == 1: return scores[0] else: @@ -416,9 +396,8 @@ def predict(self, X, break_ties="random", **kwargs): Returns: An n-dim np.ndarray of predictions in {1,...k} """ - Y_p = self._to_numpy(self.predict_proba(X, **kwargs)) - Y_ph = self._break_ties(Y_p, break_ties) - return Y_ph.astype(np.int) + Y_pred = self._to_numpy(self.predict_proba(X, **kwargs)) + return self._break_ties(Y_pred, break_ties).astype(np.int) def predict_proba(self, X, **kwargs): """Predicts soft probabilistic labels for an input X on all tasks diff --git a/metal/end_model/end_model.py b/metal/end_model/end_model.py index 6ce58638..950693bf 100644 --- a/metal/end_model/end_model.py +++ b/metal/end_model/end_model.py @@ -1,7 +1,6 @@ import torch import torch.nn as nn import torch.nn.functional as F -from torch.utils.data import DataLoader from metal.classifier import Classifier from metal.end_model.em_defaults import em_default_config @@ -159,10 +158,8 @@ def _preprocess_Y(self, Y, k): Y = hard_to_soft(Y.long(), k=k) return Y - def _make_data_loader(self, X, Y, data_loader_config): - dataset = MetalDataset(X, self._preprocess_Y(Y, self.k)) - data_loader = DataLoader(dataset, shuffle=True, **data_loader_config) - return data_loader + def _create_dataset(self, *data): + return MetalDataset(*data) def _get_loss_fn(self): if hasattr(self.config, "use_cuda"): @@ -171,33 +168,23 @@ def _get_loss_fn(self): else: criteria = self.criteria loss_fn = lambda X, Y: criteria(self.forward(X), Y) - return loss_fn - def _convert_input_data(self, data): - if type(data) is tuple: - X, Y = data - Y = self._to_torch(Y, dtype=torch.FloatTensor) - loader_config = self.config["train_config"]["data_loader_config"] - loader = self._make_data_loader(X, Y, loader_config) - elif type(data) is DataLoader: - loader = data - else: - raise ValueError( - "Unrecognized input data structure, use tuple or DataLoader." - ) - return loader - def train(self, train_data, dev_data=None, **kwargs): - self.config = recursive_merge_dicts(self.config, kwargs) + # If train_data is provided as a tuple (X, Y), we can make sure Y is in + # the correct format + # NOTE: Better handling for if train_data is Dataset or DataLoader...? + if isinstance(train_data, (tuple, list)): + X, Y = train_data + Y = self._preprocess_Y( + self._to_torch(Y, dtype=torch.FloatTensor), self.k + ) + train_data = (X, Y) + # Convert input data to data loaders - train_loader = self._convert_input_data(train_data) - if dev_data is not None: - dev_loader = self._convert_input_data(dev_data) - else: - dev_loader = None + train_loader = self._create_data_loader(train_data, shuffle=True) # Initialize the model self.reset() @@ -206,7 +193,7 @@ def train(self, train_data, dev_data=None, **kwargs): loss_fn = self._get_loss_fn() # Execute training procedure - self._train(train_loader, loss_fn, dev_loader=dev_loader) + self._train(train_loader, loss_fn, dev_data=dev_data) def predict_proba(self, X): """Returns a [n, k] tensor of soft (float) predictions.""" diff --git a/metal/label_model/lm_defaults.py b/metal/label_model/lm_defaults.py index 86295c8a..384598d4 100644 --- a/metal/label_model/lm_defaults.py +++ b/metal/label_model/lm_defaults.py @@ -5,6 +5,8 @@ "show_plots": True, # TRAIN "train_config": { + # Dataloader + "data_loader_config": {"batch_size": 1000, "num_workers": 1}, # Classifier # Class balance (if learn_class_balance=False, fix to class_balance) "learn_class_balance": False, diff --git a/metal/multitask/mt_end_model.py b/metal/multitask/mt_end_model.py index 7ecc1576..c49667cf 100644 --- a/metal/multitask/mt_end_model.py +++ b/metal/multitask/mt_end_model.py @@ -4,7 +4,6 @@ import torch import torch.nn as nn import torch.nn.functional as F -from torch.utils.data import DataLoader from metal.end_model import EndModel from metal.end_model.em_defaults import em_default_config @@ -267,7 +266,7 @@ def forward(self, x): head_outputs[t] = head(task_input) return head_outputs - def _preprocess_Y(self, Y): + def _preprocess_Y(self, Y, k=None): """Convert Y to t-length list of soft labels if necessary""" # If not a list, convert to a singleton list if not isinstance(Y, list): @@ -285,13 +284,12 @@ def _preprocess_Y(self, Y): for t, Y_t in enumerate(Y) ] - def _make_data_loader(self, X, Y, data_loader_config): + def _create_dataset(self, *data): + X, Y = data if isinstance(X, list): - dataset = MultiXYDataset(X, self._preprocess_Y(Y)) + return MultiXYDataset(X, Y) else: - dataset = MultiYDataset(X, self._preprocess_Y(Y)) - data_loader = DataLoader(dataset, shuffle=True, **data_loader_config) - return data_loader + return MultiYDataset(X, Y) def _get_loss_fn(self): """Returns the loss function to use in the train routine""" From ab9aae77bf2a2c2e686ab0e7c8926cbc972c6ebb Mon Sep 17 00:00:00 2001 From: ajratner Date: Thu, 11 Oct 2018 16:50:41 -0700 Subject: [PATCH 18/35] - Separating get_predictions back out for code reuse - Removing evaluate methods from MTClassifier - Adding MTClassifier._to_numpy method - Other small fixes --- metal/classifier.py | 57 ++++++++++----- metal/multitask/mt_classifier.py | 117 +++++------------------------- metal/multitask/mt_end_model.py | 14 +--- metal/multitask/mt_label_model.py | 3 + metal/multitask/utils.py | 17 ++++- 5 files changed, 72 insertions(+), 136 deletions(-) diff --git a/metal/classifier.py b/metal/classifier.py index f4d7aaa2..de90d4aa 100644 --- a/metal/classifier.py +++ b/metal/classifier.py @@ -257,7 +257,7 @@ def _train(self, train_data, loss_fn, dev_data=None): print_confusion_matrix=True, ) - def _create_dataset(self, data): + def _create_dataset(self, *data): """Converts input data to the appropriate Dataset""" return TensorDataset(*data) @@ -346,6 +346,40 @@ def score( Returns: scores: A (float) score """ + Y_pred, Y = self._get_predictions(data, break_ties=break_ties, **kwargs) + + # Evaluate on the specified metrics + metric_list = metric if isinstance(metric, list) else [metric] + scores = [] + for metric in metric_list: + score = metric_score(Y, Y_pred, metric, ignore_in_gold=[0]) + scores.append(score) + if verbose: + print(f"{metric.capitalize()}: {score:.3f}") + + # Optionally print confusion matrix + if print_confusion_matrix: + confusion_matrix(Y_pred, Y, pretty_print=True) + + if isinstance(scores, list) and len(scores) == 1: + return scores[0] + else: + return scores + + def _get_predictions(self, data, break_ties="random", **kwargs): + """Computes predictions in batch, given a labeled dataset + + Args: + data: a Pytorch DataLoader, Dataset, or tuple with Tensors (X,Y): + X: The input for the predict method + Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels + in {1,...,k} + break_ties: How to break ties when making predictions + + Returns: + Y_pred: A Tensor of predictions + Y: A Tensor of labels + """ data_loader = self._create_data_loader(data) Y_pred = [] Y = [] @@ -353,7 +387,7 @@ def score( # Do batch evaluation by default, getting the predictions and labels for batch_num, data in enumerate(data_loader): Xb, Yb = data - Y.append(Yb) + Y.append(self._to_numpy(Yb)) # Optionally move to GPU if self.config["train_config"]["use_cuda"]: @@ -367,24 +401,7 @@ def score( ) Y_pred = np.hstack(Y_pred) Y = np.hstack(Y) - - # Evaluate on the specified metrics - metric_list = metric if isinstance(metric, list) else [metric] - scores = [] - for metric in metric_list: - score = metric_score(Y, Y_pred, metric, ignore_in_gold=[0]) - scores.append(score) - if verbose: - print(f"{metric.capitalize()}: {score:.3f}") - - # Optionally print confusion matrix - if print_confusion_matrix: - confusion_matrix(Y_pred, Y, pretty_print=True) - - if isinstance(scores, list) and len(scores) == 1: - return scores[0] - else: - return scores + return Y_pred, Y def predict(self, X, break_ties="random", **kwargs): """Predicts hard (int) labels for an input X on all tasks diff --git a/metal/multitask/mt_classifier.py b/metal/multitask/mt_classifier.py index dc114bb2..8c93c3e9 100644 --- a/metal/multitask/mt_classifier.py +++ b/metal/multitask/mt_classifier.py @@ -1,8 +1,8 @@ import numpy as np -from torch.utils.data import DataLoader from metal.classifier import Classifier from metal.metrics import metric_score +from metal.multitask import MultiXYDataset, MultiYDataset class MTClassifier(Classifier): @@ -40,100 +40,6 @@ def __init__(self, K, config): self.multitask = True self.K = K - def _batch_evaluate(self, loader, break_ties="random", **kwargs): - """Evaluates the model using minibatches - - Args: - loader: Pytorch DataLoader supplying (X,Y): - X: The input for the predict method - Y: A t-length list of [n] or [n, 1] np.ndarrays or - torch.Tensors of gold labels in {1,...,K_t} - - - Returns: - Y_p: an np.ndarray of predictions - Y: an np.ndarray of ground truth labels - """ - Y = [] - Y_p = [] - for batch, data in enumerate(loader): - X_batch, Y_batch = data - - if self.config["train_config"]["use_cuda"]: - X_batch = X_batch.cuda() - - Y_batch_list = [] - # Breaking ties for each task if soft labels provided - for Y_t in Y_batch: - Y_t = self._to_numpy(Y_t) - if Y_t.ndim > 1: - Y_t = self._break_ties(Y_t, break_ties) - Y_batch_list.append(Y_t) - - # Overwriting with tiebroken Y - Y_batch = Y_batch_list - - self._check(Y_batch, typ=list) - - Y_p_batch = self.predict(X_batch, break_ties=break_ties, **kwargs) - self._check(Y_p_batch, typ=list) - Y_p_batch = self._to_numpy(Y_p_batch) - - Y.append(Y_batch) - Y_p.append(Y_p_batch) - - Y = np.hstack(Y) - Y_p = np.hstack(Y_p) - - return Y_p, Y - - def evaluate(self, data, break_ties="random", **kwargs): - """Evaluates the model - Args: - data: either a Pytorch DataLoader or tuple supplying (X,Y): - X: The input for the predict method - Y: A t-length list of [n] or [n, 1] np.ndarrays or - torch.Tensors of gold labels in {1,...,K_t} - - Returns: - Y_p: an np.ndarray of predictions - Y: an np.ndarray of ground truth labels - """ - - if type(data) is tuple: - X, Y = data - - if self.config["train_config"]["use_cuda"]: - X = X.cuda() - - self._check(Y, typ=list) - - Y_list = [] - - # Breaking ties for each task if soft labels provided - for Y_t in Y: - Y_t = self._to_numpy(Y_t) - if Y_t.ndim > 1: - Y_t = self._break_ties(Y_t, break_ties) - Y_list.append(Y_t) - - # Overwriting with tiebroken Y - Y = Y_list - self._check(Y, typ=list) - - Y_p = self.predict(X, break_ties=break_ties, **kwargs) - self._check(Y_p, typ=list) - - elif type(data) is DataLoader: - Y_p, Y = self._batch_evaluate(data, break_ties=break_ties) - - else: - raise ValueError( - "Unrecognized input data structure, use tuple or DataLoader." - ) - - return Y_p, Y - def score( self, data, @@ -158,12 +64,10 @@ def score( scores: A (float) score or a t-length list of such scores if reduce=None """ - - # TODO: TESTS! - Y_p, Y = self.evaluate(data, break_ties=break_ties) + Y_pred, Y = self._get_predictions(data, break_ties=break_ties, **kwargs) task_scores = [] - for t, Y_tp in enumerate(Y_p): + for t, Y_tp in enumerate(Y_pred): score = metric_score(Y[t], Y_tp, metric, ignore_in_gold=[0]) task_scores.append(score) @@ -261,6 +165,13 @@ def predict_task_proba(self, X, t=0, **kwargs): """ return self.predict_proba(X, **kwargs)[t] + def _create_dataset(self, *data): + X, Y = data + if isinstance(X, list): + return MultiXYDataset(X, Y) + else: + return MultiYDataset(X, Y) + @staticmethod def _to_torch(Z, dtype=None): """Converts a None, list, np.ndarray, or torch.Tensor to torch.Tensor""" @@ -268,3 +179,11 @@ def _to_torch(Z, dtype=None): return [Classifier._to_torch(z, dtype=dtype) for z in Z] else: return Classifier._to_torch(Z) + + @staticmethod + def _to_numpy(Z): + """Converts a None, list, np.ndarray, or torch.Tensor to np.ndarray""" + if isinstance(Z, list): + return [Classifier._to_numpy(z) for z in Z] + else: + return Classifier._to_numpy(Z) diff --git a/metal/multitask/mt_end_model.py b/metal/multitask/mt_end_model.py index c49667cf..e8e3bee9 100644 --- a/metal/multitask/mt_end_model.py +++ b/metal/multitask/mt_end_model.py @@ -9,12 +9,7 @@ from metal.end_model.em_defaults import em_default_config from metal.end_model.loss import SoftCrossEntropyLoss from metal.modules import IdentityModule -from metal.multitask import ( - MTClassifier, - MultiXYDataset, - MultiYDataset, - TaskGraph, -) +from metal.multitask import MTClassifier, TaskGraph from metal.multitask.mt_em_defaults import mt_em_default_config from metal.utils import recursive_merge_dicts @@ -284,13 +279,6 @@ def _preprocess_Y(self, Y, k=None): for t, Y_t in enumerate(Y) ] - def _create_dataset(self, *data): - X, Y = data - if isinstance(X, list): - return MultiXYDataset(X, Y) - else: - return MultiYDataset(X, Y) - def _get_loss_fn(self): """Returns the loss function to use in the train routine""" if hasattr(self.config, "use_cuda"): diff --git a/metal/multitask/mt_label_model.py b/metal/multitask/mt_label_model.py index fb38511d..789bbf57 100644 --- a/metal/multitask/mt_label_model.py +++ b/metal/multitask/mt_label_model.py @@ -63,6 +63,9 @@ def _create_L_ind(self, L): if issparse(L[0]): L = [L_t.todense() for L_t in L] + # Make sure converted to numpy here + L = self._to_numpy(L) + L_ind = np.ones((self.n, self.m * self.k)) for yi, y in enumerate(self.task_graph.feasible_set()): for t in range(self.t): diff --git a/metal/multitask/utils.py b/metal/multitask/utils.py index 85ff3379..9bd059e1 100644 --- a/metal/multitask/utils.py +++ b/metal/multitask/utils.py @@ -1,4 +1,5 @@ import numpy as np +from scipy.sparse import issparse from torch.utils.data import Dataset @@ -36,12 +37,20 @@ class MultiXYDataset(Dataset): """ def __init__(self, X, Y): + + # Need to convert sparse matrices to dense here + # TODO: Need to handle sparse matrices better overall; maybe not use + # Datasets for them...? + if issparse(X[0]): + X = [Xt.toarray() for Xt in X] + + # Check and set data objects self.X = X self.Y = Y self.t = len(Y) - n = len(X[0]) - assert np.all([len(Y_t) == n for Y_t in Y]) - assert np.all([len(X_t) == n for X_t in X]) + self.n = len(X[0]) + assert np.all([len(X_t) == self.n for X_t in X]) + assert np.all([len(Y_t) == self.n for Y_t in Y]) def __getitem__(self, index): return tuple( @@ -52,4 +61,4 @@ def __getitem__(self, index): ) def __len__(self): - return len(self.X[0]) + return self.n From 053a47340ccd64284c319446f447da068f0506c0 Mon Sep 17 00:00:00 2001 From: ajratner Date: Thu, 11 Oct 2018 17:09:46 -0700 Subject: [PATCH 19/35] Moved "use_cuda" to global configs --- metal/classifier.py | 8 ++++---- metal/end_model/em_defaults.py | 4 ++-- metal/end_model/end_model.py | 2 +- metal/label_model/lm_defaults.py | 4 ++-- metal/multitask/mt_end_model.py | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/metal/classifier.py b/metal/classifier.py index de90d4aa..8b063501 100644 --- a/metal/classifier.py +++ b/metal/classifier.py @@ -149,7 +149,7 @@ def _train(self, train_data, loss_fn, dev_data=None): ) # Moving model to GPU - if train_config["use_cuda"]: + if self.config["use_cuda"]: if self.config["verbose"]: print("Using GPU...") self.cuda() @@ -164,7 +164,7 @@ def _train(self, train_data, loss_fn, dev_data=None): ): # Moving data to GPU - if train_config["use_cuda"]: + if self.config["use_cuda"]: data = data.cuda() # Zero the parameter gradients @@ -271,7 +271,7 @@ def _create_data_loader(self, data, **kwargs): config = { **self.config["train_config"]["data_loader_config"], **kwargs, - "pin_memory": self.config["train_config"]["use_cuda"], + "pin_memory": self.config["use_cuda"], } # Return data as DataLoader @@ -390,7 +390,7 @@ def _get_predictions(self, data, break_ties="random", **kwargs): Y.append(self._to_numpy(Yb)) # Optionally move to GPU - if self.config["train_config"]["use_cuda"]: + if self.config["use_cuda"]: Xb = Xb.cuda() # Append predictions and labels from DataLoader diff --git a/metal/end_model/em_defaults.py b/metal/end_model/em_defaults.py index d4ee7f19..6c931251 100644 --- a/metal/end_model/em_defaults.py +++ b/metal/end_model/em_defaults.py @@ -14,13 +14,13 @@ "layer_out_dims": [10, 2], "batchnorm": False, "dropout": 0.0, + # GPU + "use_cuda": False, # TRAINING "train_config": { # Display "print_every": 1, # Print after this many epochs "disable_prog_bar": False, # Disable progress bar each epoch - # GPU - "use_cuda": False, # Dataloader "data_loader_config": {"batch_size": 32, "num_workers": 1}, # Train Loop diff --git a/metal/end_model/end_model.py b/metal/end_model/end_model.py index 950693bf..946d117a 100644 --- a/metal/end_model/end_model.py +++ b/metal/end_model/end_model.py @@ -71,7 +71,7 @@ def _build(self, input_module, middle_modules, head_module): # Construct loss module self.criteria = SoftCrossEntropyLoss( - reduction="sum", use_cuda=self.config["train_config"]["use_cuda"] + reduction="sum", use_cuda=self.config["use_cuda"] ) def _build_input_layer(self, input_module): diff --git a/metal/label_model/lm_defaults.py b/metal/label_model/lm_defaults.py index 384598d4..649b29a8 100644 --- a/metal/label_model/lm_defaults.py +++ b/metal/label_model/lm_defaults.py @@ -3,6 +3,8 @@ "seed": None, "verbose": True, "show_plots": True, + # GPU + "use_cuda": False, # TRAIN "train_config": { # Dataloader @@ -29,7 +31,5 @@ "n_epochs": 100, "print_every": 10, "disable_prog_bar": True, # Disable progress bar each epoch - # GPU - "use_cuda": False, }, } diff --git a/metal/multitask/mt_end_model.py b/metal/multitask/mt_end_model.py index e8e3bee9..b633cd3a 100644 --- a/metal/multitask/mt_end_model.py +++ b/metal/multitask/mt_end_model.py @@ -87,7 +87,7 @@ def _build(self, input_modules, middle_modules, head_modules): # Construct loss module self.criteria = SoftCrossEntropyLoss( - reduction="sum", use_cuda=self.config["train_config"]["use_cuda"] + reduction="sum", use_cuda=self.config["use_cuda"] ) def _build_input_layer(self, input_modules): From 2c010dd46b4a97b813f23b4685c188b3db902b17 Mon Sep 17 00:00:00 2001 From: ajratner Date: Thu, 11 Oct 2018 17:14:18 -0700 Subject: [PATCH 20/35] Adding cuda availability check --- metal/classifier.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/metal/classifier.py b/metal/classifier.py index 8b063501..60fe3b6b 100644 --- a/metal/classifier.py +++ b/metal/classifier.py @@ -41,10 +41,15 @@ def __init__(self, k, config): self.multitask = False self.k = k + # Set random seed if self.config["seed"] is None: self.config["seed"] = np.random.randint(1e6) self._set_seed(self.config["seed"]) + # Confirm that cuda is available if config is using CUDA + if self.config["use_cuda"] and not torch.cuda.is_available(): + raise ValueError("use_cuda=True but CUDA not available.") + def _set_seed(self, seed): self.seed = seed if torch.cuda.is_available(): From bf35050d22d4abeb7845a88795e4fb7d45235c3b Mon Sep 17 00:00:00 2001 From: ajratner Date: Thu, 11 Oct 2018 17:45:22 -0700 Subject: [PATCH 21/35] Adding new citation --- README.md | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index cae924aa..6b4739e8 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ Snorkel MeTaL uses a new matrix approximation approach to learn the accuracies o This makes it significantly more scalable than our previous approaches. ## References -* **Best Reference: [_Training Complex Models with Multi-Task Weak Supervision_](https://ajratner.github.io/assets/papers/mts-draft.pdf) [Technical Report]** +* **Best Reference: [_Training Complex Models with Multi-Task Weak Supervision_](https://arxiv.org/abs/1810.02840) [Technical Report]** * [Snorkel MeTaL: Weak Supervision for Multi-Task Learning](https://ajratner.github.io/assets/papers/deem-metal-prototype.pdf) [SIGMOD DEEM 2018] * _[Snorkel: Rapid Training Data Creation with Weak Supervision](https://arxiv.org/abs/1711.10160) [VLDB 2018]_ * _[Data Programming: Creating Large Training Sets, Quickly](https://arxiv.org/abs/1605.07723) [NIPS 2016]_ diff --git a/setup.py b/setup.py index 1420423e..55200e28 100644 --- a/setup.py +++ b/setup.py @@ -65,7 +65,7 @@ def run(self): "Homepage": "https://hazyresearch.github.io/snorkel/", "Source": "https://github.com/HazyResearch/metal/", "Bug Reports": "https://github.com/HazyResearch/metal/issues", - "Citation": "https://ajratner.github.io/assets/papers/mts-draft.pdf", + "Citation": "https://arxiv.org/abs/1810.02840", }, cmdclass={"clean": CleanCommand}, ) From 126f55d1d2684749a6314f0052b2b462f0d16d5a Mon Sep 17 00:00:00 2001 From: ajratner Date: Thu, 11 Oct 2018 17:50:56 -0700 Subject: [PATCH 22/35] Bug fix post-merge --- metal/classifier.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/metal/classifier.py b/metal/classifier.py index 04e96bf9..cfa8244f 100644 --- a/metal/classifier.py +++ b/metal/classifier.py @@ -282,7 +282,8 @@ def _create_data_loader(self, data, **kwargs): else: raise ValueError("Input data type not recognized.") - def _set_optimizer(self, optimizer_config): + def _set_optimizer(self, train_config): + optimizer_config = train_config["optimizer_config"] opt = optimizer_config["optimizer"] # We set L2 here if the class does not implement its own L2 reg From e3009679dc1f96ff039774dbce15361348133de7 Mon Sep 17 00:00:00 2001 From: Jared Date: Thu, 11 Oct 2018 20:33:10 -0700 Subject: [PATCH 23/35] fixed issues with cuda allocation, training on GPU --- metal/classifier.py | 5 +- metal/utils.py | 10 +++ tutorials/Basics.ipynb | 148 +++++++++++++++++++++++++++++++------- tutorials/Multitask.ipynb | 27 ++++++- 4 files changed, 160 insertions(+), 30 deletions(-) diff --git a/metal/classifier.py b/metal/classifier.py index cfa8244f..2946f157 100644 --- a/metal/classifier.py +++ b/metal/classifier.py @@ -10,7 +10,7 @@ from metal.analysis import confusion_matrix from metal.metrics import metric_score -from metal.utils import Checkpointer, recursive_merge_dicts +from metal.utils import Checkpointer, mt_to_cuda, recursive_merge_dicts class Classifier(nn.Module): @@ -178,11 +178,12 @@ def _train(self, train_data, loss_fn, dev_data=None): # Moving data to GPU if self.config["use_cuda"]: - data = data.cuda() + data = mt_to_cuda(data) # Zero the parameter gradients optimizer.zero_grad() + # import pdb; pdb.set_trace() # Forward pass to calculate outputs loss = loss_fn(*data) if torch.isnan(loss): diff --git a/metal/utils.py b/metal/utils.py index d5b6b7c1..6b3491ed 100644 --- a/metal/utils.py +++ b/metal/utils.py @@ -365,3 +365,13 @@ def slice_data(data, indices): return outputs[0] else: return outputs + + +def mt_to_cuda(data): + """Utility to push data from multitask data loaders to GPU""" + data[0] = data[0].cuda() + if isinstance(data[1], list): + data[1] = [d.cuda() for d in data[1]] + else: + data[1] = data[1].cuda() + return data diff --git a/tutorials/Basics.ipynb b/tutorials/Basics.ipynb index 3b4b9591..dedc1738 100644 --- a/tutorials/Basics.ipynb +++ b/tutorials/Basics.ipynb @@ -38,18 +38,101 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 5, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "../../metal/metal/analysis.py:13: UserWarning: \n", + "This call to matplotlib.use() has no effect because the backend has already\n", + "been chosen; matplotlib.use() must be called *before* pylab, matplotlib.pyplot,\n", + "or matplotlib.backends is imported for the first time.\n", + "\n", + "The backend was *originally* set to 'module://ipykernel.pylab.backend_inline' by the following code:\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n", + " \"__main__\", mod_spec)\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/runpy.py\", line 85, in _run_code\n", + " exec(code, run_globals)\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/ipykernel/__main__.py\", line 3, in \n", + " app.launch_new_instance()\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/traitlets/config/application.py\", line 658, in launch_instance\n", + " app.start()\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 497, in start\n", + " self.io_loop.start()\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 132, in start\n", + " self.asyncio_loop.run_forever()\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/asyncio/base_events.py\", line 422, in run_forever\n", + " self._run_once()\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/asyncio/base_events.py\", line 1434, in _run_once\n", + " handle._run()\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/asyncio/events.py\", line 145, in _run\n", + " self._callback(*self._args)\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 122, in _handle_events\n", + " handler_func(fileobj, events)\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/tornado/stack_context.py\", line 300, in null_wrapper\n", + " return fn(*args, **kwargs)\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py\", line 450, in _handle_events\n", + " self._handle_recv()\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py\", line 480, in _handle_recv\n", + " self._run_callback(callback, msg)\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py\", line 432, in _run_callback\n", + " callback(*args, **kwargs)\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/tornado/stack_context.py\", line 300, in null_wrapper\n", + " return fn(*args, **kwargs)\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 283, in dispatcher\n", + " return self.dispatch_shell(stream, msg)\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 233, in dispatch_shell\n", + " handler(stream, idents, msg)\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 399, in execute_request\n", + " user_expressions, allow_stdin)\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 208, in do_execute\n", + " res = shell.run_cell(code, store_history=store_history, silent=silent)\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 537, in run_cell\n", + " return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2666, in run_cell\n", + " self.events.trigger('post_run_cell', result)\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/IPython/core/events.py\", line 88, in trigger\n", + " func(*args, **kwargs)\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/ipykernel/pylab/backend_inline.py\", line 164, in configure_once\n", + " activate_matplotlib(backend)\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/IPython/core/pylabtools.py\", line 311, in activate_matplotlib\n", + " matplotlib.pyplot.switch_backend(backend)\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/matplotlib/pyplot.py\", line 231, in switch_backend\n", + " matplotlib.use(newbackend, warn=False, force=True)\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/matplotlib/__init__.py\", line 1422, in use\n", + " reload(sys.modules['matplotlib.backends'])\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/importlib/__init__.py\", line 166, in reload\n", + " _bootstrap._exec(spec, module)\n", + " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/matplotlib/backends/__init__.py\", line 16, in \n", + " line for line in traceback.format_stack()\n", + "\n", + "\n", + " matplotlib.use(\"TkAgg\")\n" + ] + } + ], "source": [ + "import sys\n", + "sys.path.append('../../metal')\n", "import metal" ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 6, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The autoreload extension is already loaded. To reload it, use:\n", + " %reload_ext autoreload\n" + ] + } + ], "source": [ "%load_ext autoreload\n", "%autoreload 2\n", @@ -99,7 +182,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -118,7 +201,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -145,7 +228,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 9, "metadata": { "scrolled": true }, @@ -266,7 +349,7 @@ "9 [1, 2] 0.784 0.784 0.735" ] }, - "execution_count": 6, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } @@ -286,7 +369,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 10, "metadata": {}, "outputs": [ { @@ -313,7 +396,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -387,7 +470,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 14, "metadata": {}, "outputs": [], "source": [ @@ -408,7 +491,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 15, "metadata": { "scrolled": false }, @@ -419,14 +502,14 @@ "text": [ "Computing O...\n", "Estimating \\mu...\n", - "[E:0]\tTrain Loss: 8.279\n", - "[E:25]\tTrain Loss: 0.706\n", + "[E:0]\tTrain Loss: 6.028\n", + "[E:25]\tTrain Loss: 0.438\n", "[E:50]\tTrain Loss: 0.029\n", - "[E:75]\tTrain Loss: 0.008\n", - "[E:100]\tTrain Loss: 0.005\n", + "[E:75]\tTrain Loss: 0.004\n", + "[E:100]\tTrain Loss: 0.003\n", "[E:125]\tTrain Loss: 0.003\n", - "[E:150]\tTrain Loss: 0.003\n", - "[E:175]\tTrain Loss: 0.003\n", + "[E:150]\tTrain Loss: 0.002\n", + "[E:175]\tTrain Loss: 0.002\n", "[E:200]\tTrain Loss: 0.002\n", "[E:225]\tTrain Loss: 0.002\n", "[E:250]\tTrain Loss: 0.002\n", @@ -441,8 +524,8 @@ "[E:475]\tTrain Loss: 0.002\n", "[E:499]\tTrain Loss: 0.002\n", "Finished Training\n", - "CPU times: user 1.78 s, sys: 131 ms, total: 1.92 s\n", - "Wall time: 243 ms\n" + "CPU times: user 2.63 s, sys: 128 ms, total: 2.76 s\n", + "Wall time: 349 ms\n" ] } ], @@ -460,14 +543,24 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 16, "metadata": {}, "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Accuracy: 0.888\n" + "ename": "TypeError", + "evalue": "'int' object is not callable", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mscore\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlabel_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mscore\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mLs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mYs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;32m~/repos/metal/metal/classifier.py\u001b[0m in \u001b[0;36mscore\u001b[0;34m(self, data, metric, break_ties, verbose, print_confusion_matrix, **kwargs)\u001b[0m\n\u001b[1;32m 355\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0ma\u001b[0m \u001b[0mlist\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 356\u001b[0m \"\"\"\n\u001b[0;32m--> 357\u001b[0;31m \u001b[0mY_pred\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mY\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_predictions\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbreak_ties\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mbreak_ties\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 358\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 359\u001b[0m \u001b[0;31m# Evaluate on the specified metrics\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/repos/metal/metal/classifier.py\u001b[0m in \u001b[0;36m_get_predictions\u001b[0;34m(self, data, break_ties, **kwargs)\u001b[0m\n\u001b[1;32m 389\u001b[0m \u001b[0mY\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mA\u001b[0m \u001b[0mTensor\u001b[0m \u001b[0mof\u001b[0m \u001b[0mlabels\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 390\u001b[0m \"\"\"\n\u001b[0;32m--> 391\u001b[0;31m \u001b[0mdata_loader\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_create_data_loader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 392\u001b[0m \u001b[0mY_pred\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 393\u001b[0m \u001b[0mY\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/repos/metal/metal/classifier.py\u001b[0m in \u001b[0;36m_create_data_loader\u001b[0;34m(self, data, **kwargs)\u001b[0m\n\u001b[1;32m 277\u001b[0m \u001b[0;31m# Return data as DataLoader\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 278\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mtuple\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlist\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 279\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mDataLoader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_create_dataset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 280\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mDataset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 281\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mDataLoader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/repos/metal/metal/classifier.py\u001b[0m in \u001b[0;36m_create_dataset\u001b[0;34m(self, *data)\u001b[0m\n\u001b[1;32m 260\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_create_dataset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 261\u001b[0m \u001b[0;34m\"\"\"Converts input data to the appropriate Dataset\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 262\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mTensorDataset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 263\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 264\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_create_data_loader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/repos/anaconda3/envs/metal/lib/python3.6/site-packages/torch/utils/data/dataset.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, *tensors)\u001b[0m\n\u001b[1;32m 34\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 35\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mtensors\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 36\u001b[0;31m \u001b[0;32massert\u001b[0m \u001b[0mall\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensors\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mtensor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mtensor\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mtensors\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 37\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtensors\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtensors\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 38\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/repos/anaconda3/envs/metal/lib/python3.6/site-packages/torch/utils/data/dataset.py\u001b[0m in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 34\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 35\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mtensors\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 36\u001b[0;31m \u001b[0;32massert\u001b[0m \u001b[0mall\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensors\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mtensor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mtensor\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mtensors\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 37\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtensors\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtensors\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 38\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mTypeError\u001b[0m: 'int' object is not callable" ] } ], @@ -752,8 +845,9 @@ ], "source": [ "from metal.end_model import EndModel\n", - "\n", - "end_model = EndModel([1000,10,2], seed=123)" + "import torch\n", + "use_cuda = torch.cuda.is_available()\n", + "end_model = EndModel([1000,10,2], seed=123, use_cuda=use_cuda)" ] }, { diff --git a/tutorials/Multitask.ipynb b/tutorials/Multitask.ipynb index a4756026..29fc1a5b 100644 --- a/tutorials/Multitask.ipynb +++ b/tutorials/Multitask.ipynb @@ -168,6 +168,31 @@ "label_model.train(Ls[0], n_epochs=200, print_every=20, seed=123)" ] }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[<100x10 sparse matrix of type ''\n", + " \twith 846 stored elements in Compressed Sparse Row format>,\n", + " <100x10 sparse matrix of type ''\n", + " \twith 846 stored elements in Compressed Sparse Row format>,\n", + " <100x10 sparse matrix of type ''\n", + " \twith 846 stored elements in Compressed Sparse Row format>]" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "Ls[2]" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -387,7 +412,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 21, "metadata": { "scrolled": true }, From 84459db165892bd8253b5c4545ce02e9605480c6 Mon Sep 17 00:00:00 2001 From: ajratner Date: Thu, 11 Oct 2018 21:19:10 -0700 Subject: [PATCH 24/35] - Handle converting sparse matrices in Classifier.create_dataset - Tests for LabelModel.score --- metal/classifier.py | 13 +++++++++++-- tests/metal/label_model/test_label_model.py | 5 ++--- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/metal/classifier.py b/metal/classifier.py index 2946f157..ec4a7dc5 100644 --- a/metal/classifier.py +++ b/metal/classifier.py @@ -5,6 +5,7 @@ import torch import torch.nn as nn import torch.optim as optim +from scipy.sparse import issparse from torch.utils.data import DataLoader, Dataset, TensorDataset from tqdm import tqdm @@ -258,6 +259,8 @@ def _train(self, train_data, loss_fn, dev_data=None): def _create_dataset(self, *data): """Converts input data to the appropriate Dataset""" + # Make sure data is a tuple of dense tensors + data = [self._to_torch(x, dtype=torch.FloatTensor) for x in data] return TensorDataset(*data) def _create_data_loader(self, data, **kwargs): @@ -463,9 +466,12 @@ def _break_ties(self, Y_s, break_ties="random"): @staticmethod def _to_numpy(Z): - """Converts a None, list, np.ndarray, or torch.Tensor to np.ndarray""" + """Converts a None, list, np.ndarray, or torch.Tensor to np.ndarray; + also handles converting sparse input to dense.""" if Z is None: return Z + elif issparse(Z): + return Z.toarray() elif isinstance(Z, np.ndarray): return Z elif isinstance(Z, list): @@ -481,9 +487,12 @@ def _to_numpy(Z): @staticmethod def _to_torch(Z, dtype=None): - """Converts a None, list, np.ndarray, or torch.Tensor to torch.Tensor""" + """Converts a None, list, np.ndarray, or torch.Tensor to torch.Tensor; + also handles converting sparse input to dense.""" if Z is None: return None + elif issparse(Z): + Z = torch.from_numpy(Z.toarray()) elif isinstance(Z, torch.Tensor): pass elif isinstance(Z, list): diff --git a/tests/metal/label_model/test_label_model.py b/tests/metal/label_model/test_label_model.py index f64db9a0..0c5c6560 100644 --- a/tests/metal/label_model/test_label_model.py +++ b/tests/metal/label_model/test_label_model.py @@ -35,9 +35,8 @@ def _test_label_model(self, data, test_acc=True): # Test label prediction accuracy if test_acc: - Y_pred = label_model.predict_proba(data.L).argmax(axis=1) + 1 - acc = np.where(data.Y == Y_pred, 1, 0).sum() / data.n - self.assertGreater(acc, 0.95) + score = label_model.score((data.L, data.Y)) + self.assertGreater(score, 0.95) def test_no_deps(self): for seed in range(self.n_iters): From 5d1387e4affeac30f524c423625d70c1f5f3f450 Mon Sep 17 00:00:00 2001 From: ajratner Date: Thu, 11 Oct 2018 21:24:49 -0700 Subject: [PATCH 25/35] Fixed MajorityLabelVoter, added to tests --- metal/label_model/baselines.py | 2 +- tests/metal/label_model/test_label_model.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/metal/label_model/baselines.py b/metal/label_model/baselines.py index 5a27c37f..ddb378cb 100644 --- a/metal/label_model/baselines.py +++ b/metal/label_model/baselines.py @@ -62,7 +62,7 @@ def train(self, *args, **kwargs): pass def predict_proba(self, L): - L = np.array(L.todense()).astype(int) + L = self._to_numpy(L).astype(int) n, m = L.shape Y_p = np.zeros((n, self.k)) for i in range(n): diff --git a/tests/metal/label_model/test_label_model.py b/tests/metal/label_model/test_label_model.py index 0c5c6560..fda5181b 100644 --- a/tests/metal/label_model/test_label_model.py +++ b/tests/metal/label_model/test_label_model.py @@ -3,6 +3,7 @@ import numpy as np +from metal.label_model.baselines import MajorityLabelVoter from metal.label_model.label_model import LabelModel from synthetic.generate import SingleTaskTreeDepsGenerator @@ -38,6 +39,11 @@ def _test_label_model(self, data, test_acc=True): score = label_model.score((data.L, data.Y)) self.assertGreater(score, 0.95) + # Test against baseline + mv = MajorityLabelVoter() + mv_score = mv.score((data.L, data.Y)) + self.assertGreater(score, mv_score) + def test_no_deps(self): for seed in range(self.n_iters): np.random.seed(seed) From e2beb9265cab1894d522bdd5d61ec7c09660e432 Mon Sep 17 00:00:00 2001 From: ajratner Date: Thu, 11 Oct 2018 21:35:41 -0700 Subject: [PATCH 26/35] Cleaning up Basics tutorial --- metal/classifier.py | 5 +- tutorials/Basics.ipynb | 232 ++++++++++++++--------------------------- 2 files changed, 82 insertions(+), 155 deletions(-) diff --git a/metal/classifier.py b/metal/classifier.py index ec4a7dc5..a46c58b7 100644 --- a/metal/classifier.py +++ b/metal/classifier.py @@ -215,7 +215,10 @@ def _train(self, train_data, loss_fn, dev_data=None): if evaluate_dev and (epoch % train_config["validation_freq"] == 0): val_metric = train_config["validation_metric"] dev_score = self.score( - dev_loader, metric=val_metric, verbose=False + dev_loader, + metric=val_metric, + verbose=False, + print_confusion_matrix=False, ) if train_config["checkpoint"]: diff --git a/tutorials/Basics.ipynb b/tutorials/Basics.ipynb index dedc1738..962bbdd9 100644 --- a/tutorials/Basics.ipynb +++ b/tutorials/Basics.ipynb @@ -38,81 +38,9 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 1, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "../../metal/metal/analysis.py:13: UserWarning: \n", - "This call to matplotlib.use() has no effect because the backend has already\n", - "been chosen; matplotlib.use() must be called *before* pylab, matplotlib.pyplot,\n", - "or matplotlib.backends is imported for the first time.\n", - "\n", - "The backend was *originally* set to 'module://ipykernel.pylab.backend_inline' by the following code:\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n", - " \"__main__\", mod_spec)\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/runpy.py\", line 85, in _run_code\n", - " exec(code, run_globals)\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/ipykernel/__main__.py\", line 3, in \n", - " app.launch_new_instance()\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/traitlets/config/application.py\", line 658, in launch_instance\n", - " app.start()\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 497, in start\n", - " self.io_loop.start()\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 132, in start\n", - " self.asyncio_loop.run_forever()\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/asyncio/base_events.py\", line 422, in run_forever\n", - " self._run_once()\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/asyncio/base_events.py\", line 1434, in _run_once\n", - " handle._run()\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/asyncio/events.py\", line 145, in _run\n", - " self._callback(*self._args)\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 122, in _handle_events\n", - " handler_func(fileobj, events)\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/tornado/stack_context.py\", line 300, in null_wrapper\n", - " return fn(*args, **kwargs)\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py\", line 450, in _handle_events\n", - " self._handle_recv()\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py\", line 480, in _handle_recv\n", - " self._run_callback(callback, msg)\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py\", line 432, in _run_callback\n", - " callback(*args, **kwargs)\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/tornado/stack_context.py\", line 300, in null_wrapper\n", - " return fn(*args, **kwargs)\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 283, in dispatcher\n", - " return self.dispatch_shell(stream, msg)\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 233, in dispatch_shell\n", - " handler(stream, idents, msg)\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 399, in execute_request\n", - " user_expressions, allow_stdin)\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 208, in do_execute\n", - " res = shell.run_cell(code, store_history=store_history, silent=silent)\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 537, in run_cell\n", - " return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2666, in run_cell\n", - " self.events.trigger('post_run_cell', result)\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/IPython/core/events.py\", line 88, in trigger\n", - " func(*args, **kwargs)\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/ipykernel/pylab/backend_inline.py\", line 164, in configure_once\n", - " activate_matplotlib(backend)\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/IPython/core/pylabtools.py\", line 311, in activate_matplotlib\n", - " matplotlib.pyplot.switch_backend(backend)\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/matplotlib/pyplot.py\", line 231, in switch_backend\n", - " matplotlib.use(newbackend, warn=False, force=True)\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/matplotlib/__init__.py\", line 1422, in use\n", - " reload(sys.modules['matplotlib.backends'])\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/importlib/__init__.py\", line 166, in reload\n", - " _bootstrap._exec(spec, module)\n", - " File \"/home/jdunnmon/repos/anaconda3/envs/metal/lib/python3.6/site-packages/matplotlib/backends/__init__.py\", line 16, in \n", - " line for line in traceback.format_stack()\n", - "\n", - "\n", - " matplotlib.use(\"TkAgg\")\n" - ] - } - ], + "outputs": [], "source": [ "import sys\n", "sys.path.append('../../metal')\n", @@ -121,7 +49,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 2, "metadata": {}, "outputs": [ { @@ -182,7 +110,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -201,7 +129,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -228,7 +156,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 5, "metadata": { "scrolled": true }, @@ -349,7 +277,7 @@ "9 [1, 2] 0.784 0.784 0.735" ] }, - "execution_count": 9, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -369,7 +297,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -379,9 +307,7 @@ "
" ] }, - "metadata": { - "needs_background": "light" - }, + "metadata": {}, "output_type": "display_data" } ], @@ -396,7 +322,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 7, "metadata": {}, "outputs": [ { @@ -406,9 +332,7 @@ "
" ] }, - "metadata": { - "needs_background": "light" - }, + "metadata": {}, "output_type": "display_data" } ], @@ -470,7 +394,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -491,7 +415,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 9, "metadata": { "scrolled": false }, @@ -524,8 +448,8 @@ "[E:475]\tTrain Loss: 0.002\n", "[E:499]\tTrain Loss: 0.002\n", "Finished Training\n", - "CPU times: user 2.63 s, sys: 128 ms, total: 2.76 s\n", - "Wall time: 349 ms\n" + "CPU times: user 817 ms, sys: 26.1 ms, total: 843 ms\n", + "Wall time: 284 ms\n" ] } ], @@ -543,24 +467,17 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 10, "metadata": {}, "outputs": [ { - "ename": "TypeError", - "evalue": "'int' object is not callable", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mscore\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlabel_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mscore\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mLs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mYs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;32m~/repos/metal/metal/classifier.py\u001b[0m in \u001b[0;36mscore\u001b[0;34m(self, data, metric, break_ties, verbose, print_confusion_matrix, **kwargs)\u001b[0m\n\u001b[1;32m 355\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0ma\u001b[0m \u001b[0mlist\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 356\u001b[0m \"\"\"\n\u001b[0;32m--> 357\u001b[0;31m \u001b[0mY_pred\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mY\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_predictions\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbreak_ties\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mbreak_ties\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 358\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 359\u001b[0m \u001b[0;31m# Evaluate on the specified metrics\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/repos/metal/metal/classifier.py\u001b[0m in \u001b[0;36m_get_predictions\u001b[0;34m(self, data, break_ties, **kwargs)\u001b[0m\n\u001b[1;32m 389\u001b[0m \u001b[0mY\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mA\u001b[0m \u001b[0mTensor\u001b[0m \u001b[0mof\u001b[0m \u001b[0mlabels\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 390\u001b[0m \"\"\"\n\u001b[0;32m--> 391\u001b[0;31m \u001b[0mdata_loader\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_create_data_loader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 392\u001b[0m \u001b[0mY_pred\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 393\u001b[0m \u001b[0mY\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/repos/metal/metal/classifier.py\u001b[0m in \u001b[0;36m_create_data_loader\u001b[0;34m(self, data, **kwargs)\u001b[0m\n\u001b[1;32m 277\u001b[0m \u001b[0;31m# Return data as DataLoader\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 278\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mtuple\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlist\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 279\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mDataLoader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_create_dataset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 280\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mDataset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 281\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mDataLoader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/repos/metal/metal/classifier.py\u001b[0m in \u001b[0;36m_create_dataset\u001b[0;34m(self, *data)\u001b[0m\n\u001b[1;32m 260\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_create_dataset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 261\u001b[0m \u001b[0;34m\"\"\"Converts input data to the appropriate Dataset\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 262\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mTensorDataset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 263\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 264\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_create_data_loader\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/repos/anaconda3/envs/metal/lib/python3.6/site-packages/torch/utils/data/dataset.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, *tensors)\u001b[0m\n\u001b[1;32m 34\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 35\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mtensors\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 36\u001b[0;31m \u001b[0;32massert\u001b[0m \u001b[0mall\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensors\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mtensor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mtensor\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mtensors\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 37\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtensors\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtensors\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 38\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/repos/anaconda3/envs/metal/lib/python3.6/site-packages/torch/utils/data/dataset.py\u001b[0m in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 34\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 35\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mtensors\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 36\u001b[0;31m \u001b[0;32massert\u001b[0m \u001b[0mall\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensors\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mtensor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mtensor\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mtensors\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 37\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtensors\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtensors\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 38\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mTypeError\u001b[0m: 'int' object is not callable" + "name": "stdout", + "output_type": "stream", + "text": [ + "Accuracy: 0.879\n", + " y=1 y=2 \n", + " l=1 181 56 \n", + " l=2 65 698 \n" ] } ], @@ -577,16 +494,19 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 11, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Precision: 0.838\n", - "Recall: 0.675\n", - "F1: 0.748\n" + "Precision: 0.764\n", + "Recall: 0.736\n", + "F1: 0.749\n", + " y=1 y=2 \n", + " l=1 181 56 \n", + " l=2 65 698 \n" ] } ], @@ -603,17 +523,19 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 12, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Accuracy: 0.836\n", "Precision: 0.623\n", "Recall: 0.841\n", - "F1: 0.716\n" + "F1: 0.716\n", + " y=1 y=2 \n", + " l=1 207 125 \n", + " l=2 39 629 \n" ] } ], @@ -650,22 +572,22 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 13, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "array([[0.2409358 , 0.7590642 ],\n", - " [0.0087304 , 0.9912696 ],\n", - " [0.01293511, 0.98706489],\n", + "array([[0.33879491, 0.66120509],\n", + " [0.01750567, 0.98249433],\n", + " [0.02757502, 0.97242498],\n", " ...,\n", - " [0.5918672 , 0.4081328 ],\n", - " [0.98033657, 0.01966343],\n", - " [0.2443802 , 0.7556198 ]])" + " [0.74142168, 0.25857832],\n", + " [0.98866598, 0.01133402],\n", + " [0.38616893, 0.61383107]])" ] }, - "execution_count": 22, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } @@ -699,21 +621,19 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 14, "metadata": { "scrolled": true }, "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAH5dJREFUeJzt3XuYVnW99/H3Rw7iGUE0BAU0RDwijQaJh8QyNEV7Yqtlso2itpqZqaHPsx9xbzV7NDU74GaHCZmaGT7gMUz0Ms8OB01FZDwyojIiIASo2Hf/cf9Gb4bFzD3MrLlvnM/ruu7rXuu3fmut73CN8/G3jooIzMzMGtqs3AWYmVllckCYmVkmB4SZmWVyQJiZWSYHhJmZZXJAmJlZJgeEbdIkPSjpO229blr/EEnzN3b9jO3dI2l0mv5XSQ+34ra/KWlGa23P2gcHhFUESa9KOrLcddSTNF7Sh5JWpM+Lkn4lqWd9n4j4W0QMKHFbNzbVLyJGRMTkVqi9r6SQ1LFo23+IiC+3dNvWvjggzDbsjxGxDdANOAH4DDCrOCRagwr836JVHP9SWkWTtL2kOyXVSVqapns36La7pCclLZc0TVK3ovWHSHpU0jJJT0s6vLk1RMSHEfEccCJQB/w4bftwSbVF+/qJpDfSiGO+pOGSvgJcCJwoaaWkp1PfByVdKukRYBWwW8YhL0n6Zfq5XpA0vGjBOiOuBqOUh9L3srTPoQ0PWUn6gqSn0rafkvSFomUPSvpPSY+kn2WGpB2a++9mmz4HhFW6zYDfAX2AXYHVwK8a9DkV+DawM7AWuBZAUi/gLuASCqOAc4E/S+qxMYVExEfANOCQhsskDQDOBA5Mo46jgFcj4l7gMgqjka0jYv+i1b4FjAW2AV7L2OXngZeBHYCLgKnF4deIQ9N317TPxxrU2o3Cv8u1QHfgKuAuSd2Lun0DOA3YEehM4d/O2hkHhFW0iFgSEX+OiFURsQK4FDisQbffR8SzEfEP4N+Bf5HUATgFuDsi7o6If0bEfUA1cHQLSlpEIWwa+gjYHNhLUqeIeDUiXmpiWzdExHMRsTYiPsxYvhi4Jo1g/gjMB45pQe31jgEWRMTv075vBl4Aji3q87uIeDEiVgO3AoNaYb+2iXFAWEWTtKWk/5L0mqT3KBw+6ZoCoN7CounXgE4U/q+7DzAqHV5aJmkZMAxoyTmEXsC7DRsjogY4GxgPLJZ0i6Sdm9jWwiaWvxHrPk3zNQqjpJbamfVHLK9R+NnqvVU0vQrYuhX2a5sYB4RVuh8DA4DPR8S2fHL4REV9dima3hX4EHiHwh/g30dE16LPVhFx+cYUkk4kHwv8LWt5RNwUEcMoBFMAP6tftIFNNvUo5V6Sin/OXSmMYAD+AWxZtOwzzdjuolRjsV2BN5pYz9oZB4RVkk6SuhR9OlI4Pr+awgnXbhSOxTd0iqS9JG0J/AdwWzpfcCNwrKSjJHVI2zw84yR3oyR1kjQQuJnCH+KrMvoMkHSEpM2BNanmj9Lit4G+G3Gl0o7AWWn/o4CBwN1p2VzgpLSsCvh60Xp1wD+B3Taw3buBPSR9Q1JHSScCewF3NrM++5RzQFgluZvCH9b6z3jgGmALCiOCx4F7M9b7PXADhcMiXYCzACJiITCSwlVEdRRGFOdR+u/9iZJWAsuA6cAS4HMRsSij7+bA5anOtyj8cb8wLftT+l4iaXaJ+wZ4Auiftnkp8PWIWJKW/TuwO7AUuBi4qX6liFiV+j+SDq0NKd5o2sZXKYzOlgDnA1+NiHeaUZu1A/ILg8zMLItHEGZmlskBYWZmmRwQZmaWyQFhZmaZOjbdpXLtsMMO0bdv33KXYWa2SZk1a9Y7EdHkI2c26YDo27cv1dXV5S7DzGyTIinr2V/r8SEmMzPL5IAwM7NMDggzM8u0SZ+DMLNPpw8//JDa2lrWrFlT7lI2aV26dKF379506tRpo9Z3QJhZxamtrWWbbbahb9++rPtAWytVRLBkyRJqa2vp16/fRm3Dh5jMrOKsWbOG7t27OxxaQBLdu3dv0Sgs14CQ9CNJz0l6VtLN6XHL/SQ9IWmBpD9K6pz6bp7ma9LyvnnWZmaVzeHQci39N8wtINL7gM8CqiJiH6ADcBKFl6hcHRH9KTyqeExaZQywNCI+C1zNJy9bMTOzMsj7HERHYAtJH1J4+9WbwBEUXogOMJnCM/8nUHhu//jUfhvwK0kKP4/crN3rO+6uVt3eq5c3/mrvDh06sO+++7J27VoGDhzI5MmT2XLLLRtdZ0MefPBBrrzySu68806mT5/O888/z7hx4zL7Llu2jJtuuonTTz+9WfsYP348W2+9Neeee+5G1bghuQVERLwh6UrgdQovf5kBzAKWRcTa1K2WT96D24v0jt6IWCtpOdCdwstSPiZpLDAWYNddd82rfLOya+0/ipuS/z6uJx/WLivb/rfYYgvmzp0LwDe/+U2uu+46zjnnnI+XRwQRwWabNe8gzHHHHcdxxx23weXLli3jN7/5TbMDIi95HmLansKooB+Fl6RvBYzI6Fo/Qsg6WLbe6CEiJkZEVURU9ejR5KNEzMxa5JBDDqGmpoZXX32VgQMHcvrppzN48GAWLlzIjBkzGDp0KIMHD2bUqFGsXLkSgHvvvZc999yTYcOGMXXq1I+3dcMNN3DmmWcC8Pbbb3PCCSew//77s//++/Poo48ybtw4XnrpJQYNGsR5550HwBVXXMGBBx7Ifvvtx0UXffLG3UsvvZQBAwZw5JFHMn/+/Fx+9jxPUh8JvBIRdRHxITAV+ALQNb1rGKA3n7yEvZb08vm0fDvg3RzrMzNr1Nq1a7nnnnvYd999AZg/fz6nnnoqc+bMYauttuKSSy7hr3/9K7Nnz6aqqoqrrrqKNWvW8N3vfpc77riDv/3tb7z11luZ2z7rrLM47LDDePrpp5k9ezZ77703l19+Obvvvjtz587liiuuYMaMGSxYsIAnn3ySuXPnMmvWLB566CFmzZrFLbfcwpw5c5g6dSpPPfVULj9/nucgXgeGpBfJrwaGA9XAAxResH4LMBqYlvpPT/OPpeUzff7BzMph9erVDBo0CCiMIMaMGcOiRYvo06cPQ4YUXvH9+OOP8/zzz3PwwQcD8MEHHzB06FBeeOEF+vXrR//+/QE45ZRTmDhx4nr7mDlzJlOmTAEK5zy22247li5duk6fGTNmMGPGDA444AAAVq5cyYIFC1ixYgUnnHDCx+dFGjts1RJ5noN4QtJtwGxgLTAHmAjcBdwi6ZLUNimtMgn4vaQaCiOHk/KqzcysMcXnIIpttdVWH09HBF/60pe4+eab1+kzd+7cVrtENyK44IIL+N73vrdO+zXXXNMmlwHneh9ERFwUEXtGxD4R8a2IeD8iXo6IgyLisxExKiLeT33XpPnPpuUv51mbmVlLDBkyhEceeYSamhoAVq1axYsvvsiee+7JK6+8wksvvQSwXoDUGz58OBMmTADgo48+4r333mObbbZhxYoVH/c56qijuP766z8+t/HGG2+wePFiDj30UG6//XZWr17NihUruOOOO3L5Gf2oDTOreNPPPJj9enctdxnr6NGjBzfccAMnn3wy77//PgCXXHIJe+yxBxMnTuSYY45hhx12YNiwYTz77LPrrf+LX/yCsWPHMmnSJDp06MCECRMYOnQoBx98MPvssw8jRozgiiuuYN68eQwdOhSArbfemhtvvJHBgwdz4oknMmjQIPr06cMhhxySy8+oTfkwf1VVVfiFQfZp1d4vc91p193Waau0gNhUzJs3j4EDB67TJmlWRFQ1ta6fxWRmZpkcEGZmlskBYWZmmRwQZmaWyVcxmdmmYdGccldQWXY+IPddeARhZmaZPIIws4q332/7tO4Gxz7YZJe365bwo/E/5/HZf2f77bahc6dOnH/6aE4YcURm/wcfrebK66Zw55Rr11vW9/PHUH3PjezQbfsWFt62HBBmZg1EBMd/+xxGjzqWm359GQCv1S5i+oyHylxZ23JAmJk1MPPhJ+ncuRPfP/XrH7f16b0zP/j2SaxZ8z7/dsFlVD8zj44dOnDVRefwxYMPXGf9Je8u4+QzLqRuyVIOGrQ3m+oNyT4HYWbWwHMvvszgffbMXPbrG24F4O/338rNv7mM0WdfxJo176/T5+KrJzLsoEHMmXEzx335MF5/I/uR35XOIwgzsyacceFPefjJuXTu3InePXfiB6edCMCen+1Hn96f4cWXX1un/0OPz2bqb68E4JgjD2H7rtu2ec2twSMIM7MG9t5jN2Y/+8LH87++7ALuv/U66pYsLflwUVs8jjtvDggzswaOGHYQa97/gAmT//Rx26rVawA49POD+cPt9wDw4kuv8fobbzFg977rrH/okMH8YWqhzz0zH2HpsvfapvBW5kNMZlbxnvnOa+y32Stttj9J/P9JP+dH43/O/5swmR7dt2erLbbgZxeexcijDuf74y5j3+H/QscOHbjh6ovZfPPO66x/0Y/GcvIZFzL4qG9w2JDB7NrrM21We2vy477NKpQf993gcd9tGBCbhBLvpK7Ix31LGiBpbtHnPUlnS+om6T5JC9L39qm/JF0rqUbSM5IG51WbmZk1LbeAiIj5ETEoIgYBnwNWAbcD44D7I6I/cH+aBxgB9E+fscCEvGozM7OmtdVJ6uHASxHxGjASmJzaJwPHp+mRwJQoeBzoKqlnG9VnZhUkiE325rJK0tJ/w7YKiJOA+jd37xQRbwKk7x1Tey9gYdE6taltHZLGSqqWVF1XV5djyWZWLq8t+5C1q95zSLRARLBkyRK6dOmy0dvI/SomSZ2B44ALmuqa0bbeb0dETAQmQuEkdYsLNLOK88snlvIDoE/Xd1D60zBP/h/CdSyf12SXLl260Lt3743eRVtc5joCmB0Rb6f5tyX1jIg30yGkxam9FtilaL3ewKI2qM/MKsx77/+TSx9ask7bq12+UaZqKtT45bnvoi0OMZ3MJ4eXAKYDo9P0aGBaUfup6WqmIcDy+kNRZmbW9nIdQUjaEvgS8L2i5suBWyWNAV4HRqX2u4GjgRoKVzydlmdtZmbWuFwDIiJWAd0btC2hcFVTw74BnJFnPWZmVjo/i8nMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwyOSDMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwyOSDMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwy5RoQkrpKuk3SC5LmSRoqqZuk+yQtSN/bp76SdK2kGknPSBqcZ21mZta4vEcQvwDujYg9gf2BecA44P6I6A/cn+YBRgD902csMCHn2szMrBG5BYSkbYFDgUkAEfFBRCwDRgKTU7fJwPFpeiQwJQoeB7pK6plXfWZm1rg8RxC7AXXA7yTNkfRbSVsBO0XEmwDpe8fUvxewsGj92tS2DkljJVVLqq6rq8uxfDOz9i3PgOgIDAYmRMQBwD/45HBSFmW0xXoNERMjoioiqnr06NE6lZqZ2XryDIhaoDYinkjzt1EIjLfrDx2l78VF/XcpWr83sCjH+szMrBG5BUREvAUslDQgNQ0HngemA6NT22hgWpqeDpyarmYaAiyvPxRlZmZtr2PO2/8B8AdJnYGXgdMohNKtksYArwOjUt+7gaOBGmBV6mtmZmWSa0BExFygKmPR8Iy+AZyRZz1mZlY630ltZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpapyYCQtJWkzdL0HpKOk9Qp/9LMzKycShlBPAR0kdSLwgt+TgNuyLMoMzMrv1ICQhGxCvga8MuIOAHYK9+yzMys3EoKCElDgW8Cd6W2vB/yZ2ZmZVZKQPwQuAC4PSKek7Qb8EC+ZZmZWbk1ORKIiIconIeon38ZOCvPoszMrPyaDAhJewDnAn2L+0fEEfmVZWZm5VbKuYQ/AdcBvwU+yrccMzOrFKUExNqImJB7JWZmVlFKOUl9h6TTJfWU1K3+U8rGJb0q6e+S5kqqTm3dJN0naUH63j61S9K1kmokPSNpcAt+LjMza6FSAmI0cB7wKDArfaqbsY8vRsSgiKh/9eg44P6I6E/hxrtxqX0E0D99xgIetZiZlVEpVzH1a+V9jgQOT9OTgQeBn6T2Kend1I9L6iqpZ0S82cr7NzOzEpTyLKZOks6SdFv6nNmMZzEFMEPSLEljU9tO9X/00/eOqb0XsLBo3drU1rCesZKqJVXX1dWVWIaZmTVXKSepJwCdgN+k+W+ltu+UsO7BEbFI0o7AfZJeaKSvMtpivYaIicBEgKqqqvWWm5lZ6yglIA6MiP2L5mdKerqUjUfEovS9WNLtwEHA2/WHjiT1BBan7rXALkWr9wYWlbIfMzNrfaWcpP5I0u71M+lRG03eD5EeE75N/TTwZeBZYDqFE9+k72lpejpwarqaaQiw3OcfzMzKp5QRxHnAA5JepnAYqA+FR343ZSfgdkn1+7kpIu6V9BRwq6QxwOvAqNT/buBooAZYVeI+zMwsJ6VcxXS/pP7AAAoB8UJEvF/Cei8D+2e0LwGGZ7QHcEYpRZuZWf42GBCSjoiImZK+1mDR7pKIiKk512ZmZmXU2AjiMGAmcGzGsgAcEGZmn2IbDIiIuChN/kdEvFK8TFJr3zxnZmYVppSrmP6c0XZbaxdiZmaVpbFzEHsCewPbNTgPsS3QJe/CzMysvBo7BzEA+CrQlXXPQ6wAvptnUWZmVn6NnYOYBkyTNDQiHmvDmszMrAKUcg7i+5K61s9I2l7S9TnWZGZmFaCUgNgvIpbVz0TEUuCA/EoyM7NKUEpAbFb/1jcovBGO0h7RYWZmm7BS/tD/HHhUUv2lraOAS/MryczMKkEpz2Kakt4nfQSFZzF9LSKez70yMzMrq8bug9g2It5Lh5TeAm4qWtYtIt5tiwLNzKw8GhtB3EThPohZrPtmN6X53XKsy8zMyqyx+yC+mr793CUzs3aosUNMgxtbMSJmt345ZmZWKRo7xPTz9N0FqAKepnB4aT/gCWBYKTuQ1AGoBt6IiK+mJ8HeAnQDZgPfiogPJG0OTAE+BywBToyIV5v9E5mZWavY4H0QEfHFiPgi8BowOCKqIuJzFG6Sq2nGPn4IzCua/xlwdUT0B5YCY1L7GGBpRHwWuDr1MzOzMinlRrk9I+Lv9TMR8SwwqJSNS+oNHAP8Ns2LwuWy9fdUTAaOT9Mj0zxp+fDU38zMyqCUG+XmSfotcCOFq5dOYd0RQWOuAc4Htknz3YFlEbE2zdcCvdJ0L2AhQESslbQ89X+nxH2ZmVkrKmUEcRrwHIVDRWcDz6e2Rkn6KrA4ImYVN2d0jRKWFW93rKRqSdV1dXVNlWFmZhuplDup10i6Drg7IuY3Y9sHA8dJOprCie5tKYwoukrqmEYRvYFFqX8tsAtQK6kjsB2w3s14ETERmAhQVVW1XoCYmVnraHIEIek4YC5wb5ofJGl6U+tFxAUR0Tsi+gInATMj4pvAA8DXU7fRwLQ0PT3Nk5bPjAgHgJlZmZRyiOki4CBgGUBEzAX6tmCfPwHOkVRD4RzDpNQ+Ceie2s8BxrVgH2Zm1kKlnKReGxHLW3JBUUQ8CDyYpl+mEDgN+6yh8KRYMzOrAKUExLOSvgF0kNQfOAt4NN+yzMys3Eo5xPQDYG/gfQoP8FtO4WomMzP7FGt0BJEek3FxRJwH/O+2KcnMzCpBoyOIiPiIwrORzMysnSnlHMScdFnrn4B/1DdGxNTcqjIzs7IrJSC6UXi66hFFbQE4IMzMPsVKCYjzIsLPQzIza2c2eA5C0rGS6oBnJNVK+kIb1mVmZmXW2EnqS4FDImJn4H8BP22bkszMrBI0FhBrI+IFgIh4gk8e2W1mZu1AY+cgdpR0zobmI+Kq/MoyM7Nyaywg/pt1Rw0N583M7FNsgwERERe3ZSFmZlZZSnkWk5mZtUMOCDMzy+SAMDOzTKW8cvT/FE1vnm85ZmZWKRq7k/p8SUP55P3RAI+VumFJXSQ9KelpSc9Juji195P0hKQFkv4oqXNq3zzN16TlfTfuRzIzs9bQ2AhiPoVXgO4m6W+SJlJ4Z/SAErf9PnBEROwPDAK+ImkI8DPg6ojoDywFxqT+Y4ClEfFZ4OrUz8zMyqSxgFgKXAjUAIcD16b2cZKafOVoFKxMs53SJyg8Ffa21D4ZOD5Nj0zzpOXD1ZIXYZuZWYs0FhBfAe4CdgeuAg4C/hERp0VESQ/uk9RB0lxgMXAf8BKwLCLWpi61QK803QtYCJCWLwe6Z2xzrKRqSdV1dXWllGFmZhthgwERERdGxHDgVeBGCjfV9ZD0sKQ7Stl4RHwUEYOA3hQCZmBWt/SdNVqI9RoiJkZEVURU9ejRo5QyzMxsI5TyPoi/RMRTwFOS/i0ihknaoTk7iYhlkh4EhgBdJXVMo4TewKLUrRbYBaiV1BHYDni3OfsxM7PW0+RlrhFxftHsv6a2Jl8gJKmHpK5pegvgSGAe8ACfXBk1GpiWpqenedLymRGx3gjCzMzaRikjiI9FxNPN6N4TmCypA4UgujUi7pT0PHCLpEuAOcCk1H8S8HtJNRRGDic1pzYzM2tdzQqI5oiIZ4ADMtpfpnA+omH7GgqX1ZqZWQXwozbMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwyOSDMzCyTA8LMzDLldqOcbWLGb1fuCirL+OXlrsCs7DyCMDOzTA4IMzPL5IAwM7NMDggzM8vkgDAzs0wOCDMzy+SAMDOzTA4IMzPLlFtASNpF0gOS5kl6TtIPU3s3SfdJWpC+t0/tknStpBpJz0ganFdtZmbWtDxHEGuBH0fEQGAIcIakvYBxwP0R0R+4P80DjAD6p89YYEKOtZmZWRNyC4iIeDMiZqfpFcA8oBcwEpicuk0Gjk/TI4EpUfA40FVSz7zqMzOzxrXJOQhJfYEDgCeAnSLiTSiECLBj6tYLWFi0Wm1qa7itsZKqJVXX1dXlWbaZWbuW+8P6JG0N/Bk4OyLek7TBrhltsV5DxERgIkBVVdV6y0vVd9xdG7vqp9KrXcpdgZlVmlxHEJI6UQiHP0TE1NT8dv2ho/S9OLXXArsUrd4bWJRnfWZmtmF5XsUkYBIwLyKuKlo0HRidpkcD04raT01XMw0BltcfijIzs7aX5yGmg4FvAX+XNDe1XQhcDtwqaQzwOjAqLbsbOBqoAVYBp+VYm5mZNSG3gIiIh8k+rwAwPKN/AGfkVY+ZmTWP76Q2M7NMDggzM8vkgDAzs0wOCDMzy+SAMDOzTA4IMzPL5IAwM7NMDggzM8vkgDAzs0wOCDMzy+SAMDOzTA4IMzPL5IAwM7NMDggzM8vkgDAzs0wOCDMzy+SAMDOzTHm+k/p6SYslPVvU1k3SfZIWpO/tU7skXSupRtIzkgbnVZeZmZUmzxHEDcBXGrSNA+6PiP7A/WkeYATQP33GAhNyrMvMzEqQW0BExEPAuw2aRwKT0/Rk4Pii9ilR8DjQVVLPvGozM7OmtfU5iJ0i4k2A9L1jau8FLCzqV5va1iNprKRqSdV1dXW5Fmtm1p5VyklqZbRFVseImBgRVRFR1aNHj5zLMjNrv9o6IN6uP3SUvhen9lpgl6J+vYFFbVybmZkVaeuAmA6MTtOjgWlF7aemq5mGAMvrD0WZmVl5dMxrw5JuBg4HdpBUC1wEXA7cKmkM8DowKnW/GzgaqAFWAaflVZeZmZUmt4CIiJM3sGh4Rt8AzsirFjMza75KOUltZmYVxgFhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkcEGZmlqmiAkLSVyTNl1QjaVy56zEza88qJiAkdQB+DYwA9gJOlrRXeasyM2u/KiYggIOAmoh4OSI+AG4BRpa5JjOzdqtjuQso0gtYWDRfC3y+YSdJY4GxaXalpPltUNunnmAH4J1y11ExLla5K7AG/DvaQMt+R/uU0qmSAiLrp431GiImAhPzL6d9kVQdEVXlrsNsQ/w72vYq6RBTLbBL0XxvYFGZajEza/cqKSCeAvpL6iepM3ASML3MNZmZtVsVc4gpItZKOhP4C9ABuD4initzWe2JD9tZpfPvaBtTxHqH+c3MzCrqEJOZmVUQB4SZmWVyQLRzkq6XtFjSs+WuxSyLpF0kPSBpnqTnJP2w3DW1Fz4H0c5JOhRYCUyJiH3KXY9ZQ5J6Aj0jYrakbYBZwPER8XyZS/vU8wiinYuIh4B3y12H2YZExJsRMTtNrwDmUXjyguXMAWFmmwxJfYEDgCfKW0n74IAws02CpK2BPwNnR8R75a6nPXBAmFnFk9SJQjj8ISKmlrue9sIBYWYVTZKAScC8iLiq3PW0Jw6Idk7SzcBjwABJtZLGlLsmswYOBr4FHCFpbvocXe6i2gNf5mpmZpk8gjAzs0wOCDMzy+SAMDOzTA4IMzPL5IAwM7NMDgizEkha2Yy+4yWdm9f2zdqKA8LMzDI5IMw2kqRjJT0haY6kv0raqWjx/pJmSlog6btF65wn6SlJz0i6uAxlm5XMAWG28R4GhkTEAcAtwPlFy/YDjgGGAv9X0s6Svgz0Bw4CBgGfS+/jMKtIHctdgNkmrDfwx/RCm87AK0XLpkXEamC1pAcohMIw4MvAnNRnawqB8VDblWxWOgeE2cb7JXBVREyXdDgwvmhZw2fYBCDgpxHxX21TnlnL+BCT2cbbDngjTY9usGykpC6SugOHA08BfwG+nd5rgKReknZsq2LNmssjCLPSbCmptmj+Kgojhj9JegN4HOhXtPxJ4C5gV+A/I2IRsEjSQOCxwhOsWQmcAizOv3yz5vPTXM3MLJMPMZmZWSYHhJmZZXJAmJlZJgeEmZllckCYmVkmB4SZmWVyQJiZWab/AdCnXr3vH4ZlAAAAAElFTkSuQmCC\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAH2NJREFUeJzt3XucVWW9x/HPVwQBb1xEQ+4qIVaKOCrkJRO7qAlakdpFMopTalaWhp5TYmnZTctKjLTEu2YaZGZ6UI6VVxQ0FdFRQUYURgQEAQP6nT/2M7IZFjN7mFmzN8z3/Xrt117rWc9a67fnNcyXZ922IgIzM7P6til3AWZmVpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFbNEnTJX2xtddN6x8mac7mrp+xvb9KGpOmPy/pHy247c9IurultmdtgwPCKoKkuZKOKncddSRNkLRG0vL0ek7SryT1rOsTEX+PiEElbuu6xvpFxNERMbkFau8vKSRtW7Tt6yPiw83dtrUtDgizTbs5InYEugEnAO8CHisOiZagAv9btIrjX0qraJK6SrpDUq2kJWm6d71ue0p6RNIySVMkdStaf5ikByQtlfSEpCOaWkNErImIp4ETgVrgm2nbR0iqKdrXtyW9kkYccySNkPRR4DzgREkrJD2R+k6XdJGkfwIrgT0yDnlJ0i/T53pW0oiiBRuMuOqNUu5P70vTPofXP2Ql6f2SHk3bflTS+4uWTZf0fUn/TJ/lbkm7NPXnZls+B4RVum2A3wP9gL7AKuBX9fqcAnwB2B1YC1wGIKkX8BfgQgqjgG8Bf5TUY3MKiYh1wBTgsPrLJA0CzgAOTKOOjwBzI+Iu4AcURiM7RMR+Rat9DhgH7AjMy9jlwcCLwC7A+cBtxeHXgMPTe5e0zwfr1dqNws/lMqA7cAnwF0ndi7p9GjgV2BXoQOFnZ22MA8IqWkQsjog/RsTKiFgOXAR8oF63ayPiqYh4C/gO8ClJ7YDPAndGxJ0R8Z+IuAeYARzTjJIWUAib+tYB2wH7SGofEXMj4oVGtnV1RDwdEWsjYk3G8kXAz9MI5mZgDnBsM2qvcyzwfERcm/Z9I/AscFxRn99HxHMRsQq4BRjSAvu1LYwDwiqapM6SfiNpnqQ3KRw+6ZICoM78oul5QHsK/+vuB4xOh5eWSloKHAo05xxCL+CN+o0RUQ18HZgALJJ0k6TdG9nW/EaWvxIbPk1zHoVRUnPtzsYjlnkUPlud14qmVwI7tMB+bQvjgLBK901gEHBwROzE+sMnKurTp2i6L7AGeJ3CH+BrI6JL0Wv7iLh4cwpJJ5KPA/6etTwiboiIQykEUwA/qlu0iU029ijlXpKKP2dfCiMYgLeAzkXL3tWE7S5INRbrC7zSyHrWxjggrJK0l9Sx6LUthePzqyiccO1G4Vh8fZ+VtI+kzsD3gFvT+YLrgOMkfURSu7TNIzJOcjdIUntJg4EbKfwhviSjzyBJR0raDlidal6XFi8E+m/GlUq7Amem/Y8GBgN3pmWzgJPSsirgk0Xr1QL/AfbYxHbvBN4t6dOStpV0IrAPcEcT67OtnAPCKsmdFP6w1r0mAD8HOlEYETwE3JWx3rXA1RQOi3QEzgSIiPnAKApXEdVSGFGcTem/9ydKWgEsBaYCi4EDImJBRt/tgItTna9R+ON+Xlr2h/S+WNLjJe4b4GFgYNrmRcAnI2JxWvYdYE9gCXABcEPdShGxMvX/Zzq0Nqx4o2kbH6MwOlsMnAN8LCJeb0Jt1gbIXxhkZmZZPIIwM7NMDggzM8uUa0BI+oakpyU9JenGdJJwgKSHJT0v6WZJHVLf7dJ8dVreP8/azMysYbkFRLqL9UygKiLeC7QDTqJw6d+lETGQwgm2sWmVscCSiNgLuJT1lwiamVkZbNt4l2Zvv5OkNRSu2X4VOJLCbfwAkylcqTKRwtUmE1L7rcCvJCkaOIu+yy67RP/+/XMp3Mxsa/XYY4+9HhGNPnImt4CIiFck/RR4mcIli3cDjwFLI2Jt6lbD+rs3e5HuLI2ItZKWUXhOzAaX3kkaR+H5NfTt25cZM2bk9RHMzLZKkrKe/bWRPA8xdaUwKhhA4db+7YGjM7rWjRDUwLL1DRGTIqIqIqp69NisZ66ZmVkJ8jxJfRTwUkTUpgeR3Qa8n8JzdOpGLr1Z/+iAGtIjE9Lyncl45o2ZmbWOPAPiZWBYetiagBHAM8B9rH8swBgKj0+Gwp2qY9L0J4F7Gzr/YGZm+crzHMTDkm4FHqfwjP6ZwCQKz6G/SdKFqe2qtMpVwLWSqimMHE7KqzYzq2xr1qyhpqaG1atXl7uULVrHjh3p3bs37du336z1t+hHbVRVVYVPUpttfV566SV23HFHunfvzoYPtLVSRQSLFy9m+fLlDBgwYINlkh6LiKrGtuE7qc2s4qxevdrh0EyS6N69e7NGYQ4IM6tIDofma+7P0AFhZmaZ8r6T2sys2fqP/0uLbm/uxQ1/tXe7du143/vex9q1axk8eDCTJ0+mc+fODa6zKdOnT+enP/0pd9xxB1OnTuWZZ55h/PjxmX2XLl3KDTfcwGmnndakfUyYMIEddtiBb33rW5tV46Y4IMwqVEv/UdyS/HZkT9bULC3b/jt16sSsWbMA+MxnPsMVV1zBWWed9c7yiCAi2Gabph2EGTlyJCNHjtzk8qVLl3L55Zc3OSDy4kNMZmYNOOyww6iurmbu3LkMHjyY0047jaFDhzJ//nzuvvtuhg8fztChQxk9ejQrVqwA4K677mLvvffm0EMP5bbbbntnW1dffTVnnHEGAAsXLuSEE05gv/32Y7/99uOBBx5g/PjxvPDCCwwZMoSzzz4bgJ/85CcceOCB7Lvvvpx//vpv3L3ooosYNGgQRx11FHPmzMnls3sEYWZtz4KZDS+P/8CCmaxdu5a/3n4THz3i/bDwaebMmcPvfzyey//nS7z+xgtc+N3x/O91v2L7zp340a+v5pILzuacr4zhS1/4PPfe8hv2GtCHE788HlavKuxzyTx4qxYWzOTML3+bDxywL7f/+rusW7eOFW+t5OJvfI6nZs1g1p2/B+DuGy/n+Sce5JE//YaIYOTnv879t/Vh+86duOmmm5g5s1Dj0KFDOeCAA1r8x+SAMDOrZ9XqtxnyocK9uocdvD9jTz6eBQtr6de7J8MO2BeAhx77F8889xKHjDoVgH+vWcPwA/bl2eq5DOi7OwP36AvAZz9xDJOu++NG+7j3n49yzS++DxTOeey8044sWbZ8gz53/99D3P1/D7H/h08GYMXKlTz/0nyWr3iLE0444Z3zIg0dtmoOB4SZWT2dOm7HrHtu2qh9+86d3pmOCD50+MHcePkPN+gz66k5LXaJbkRw7hmn8l+f++QG7T//7fWtchmwz0GYmW2GYQfsyz8ffYLql14GYOWqVTz3wjz23qs/L728gBfmzgfgxj/dlbn+iEMPYuI1fwBg3bp1vLl8BTtu35nlK956p89HjhjO726eyoq3VgLwyquLWPT6Gxw+bCi33347q1atYvny5fz5z3/O5TN6BGFmFW/qGYew7zYvlbuMDfTo3pWrL53Ayaefx9v//jcAF55zOu/esx+TfvzfHHvK19ilWxcOPWgITz1bvdH6v/je2Yw750KuumkK7bbZhok/PJfhVftxyIFDeO+Rozn6g+/nJ9/5BrOff4nhIz8PwA6dO3HdLy9k6PsGc+KJJzJkyBD69evHYYcdlstn9LOYzCpUW7/Mdbe+e2zQVmkBUXa7719St9mzZzN48OAN2vwsJjMzaxYHhJmZZXJAmJlZJgeEmZllckCYmVkmB4SZmWXK7T4ISYOAm4ua9gC+C1yT2vsDc4FPRcQSFW4L/AVwDLAS+HxEPJ5XfWa25dj3yn4tu8Fx0xvtsrB2Md+Y8DMeevxfdN15Rzq0b885p43hhKOPzOw//YEZ/PSKa7jjmss2Wtb/4GOZ8dfr2KVb12YW3rpyG0FExJyIGBIRQ4ADKPzRvx0YD0yLiIHAtDQPcDQwML3GARPzqs3MrCERwfFfOIvDDx7Kiw/+mcfuuoGbJv6QmlcXlbu0VtVad1KPAF6IiHmSRgFHpPbJwHTg28Ao4Joo3Ln3kKQuknpGxKutVKOZGQD3/uMROnRoz5dPWf8MpH69d+erXziJ1avf5ivn/oAZT85m23btuOT8s/jgIQdusP7iN5Zy8unnUbt4CQcNeQ9b6g3JrXUO4iTgxjS9W90f/fS+a2rvBcwvWqcmtW1A0jhJMyTNqK2tzbFkM2urnn7uRYa+d+/MZb+++hYA/jXtFm68/AeM+fr5rF799gZ9Lrh0EoceNISZd9/IyA9/gJdfeS33mvOQ+whCUgdgJHBuY10z2jaK3YiYBEyCwqM2ml2gmVkjTj/vh/zjkVl06NCe3j1346unngjA3nsNoF/vd/Hci/M26H//Q49z25U/BeDYow6ja5edWr3mltAaI4ijgccjYmGaXyipJ0B6rzuoVwP0KVqvN7CgFeozM9vAe969B48/9ew787/+wblMu+UKahcvKflwUWs8jjtvrREQJ7P+8BLAVGBMmh4DTClqP0UFw4BlPv9gZuVw5KEHsfrtfzNx8h/eaVu5ajUAhx88lOtv/ysAz70wj5dfeY1Be/bfYP3Dhw3l+tsKff567z9ZsvTN1im8heV6iElSZ+BDwH8VNV8M3CJpLPAyMDq130nhEtdqClc8nZpnbWa25Xjyi/Na9WmukvjTVT/jGxN+xo8nTqZH965s36kTPzrvTEZ95Ai+PP4HvG/Ep9i2XTuuvvQCttuuwwbrn/+NcZx8+nkM/cin+cCwofTt9a5Wq70l+XHfZhXKj/v2474b5Md9m5lZuTggzMwskwPCzCpOEFvszWWVpLk/QweEmVWceUvXsHblmw6JZogIFi9eTMeOHTd7G631qA0zs5L98uElfBXo1+V1lO6hnS0/OWEDy2Y32qVjx4707t17s3fhgDCzivPm2//hovsXb9A2t+Ony1RNhZqwLPdd+BCTmZllckCYmVkmB4SZmWVyQJiZWSYHhJmZZXJAmJlZJgeEmZllckCYmVkmB4SZmWVyQJiZWSYHhJmZZXJAmJlZplwDQlIXSbdKelbSbEnDJXWTdI+k59N719RXki6TVC3pSUlD86zNzMwalvcI4hfAXRGxN7AfMBsYD0yLiIHAtDQPcDQwML3GARNzrs3MzBqQW0BI2gk4HLgKICL+HRFLgVHA5NRtMnB8mh4FXBMFDwFdJPXMqz4zM2tYniOIPYBa4PeSZkq6UtL2wG4R8SpAet819e8FzC9avya1bUDSOEkzJM2orfUXiJiZ5SXPgNgWGApMjIj9gbdYfzgpizLaNvq+wYiYFBFVEVHVo0ePlqnUzMw2kmdA1AA1EfFwmr+VQmAsrDt0lN4XFfXvU7R+b2BBjvWZmVkDcguIiHgNmC9pUGoaATwDTAXGpLYxwJQ0PRU4JV3NNAxYVncoyszMWl/e30n9VeB6SR2AF4FTKYTSLZLGAi8Do1PfO4FjgGpgZeprZmZlkmtARMQsoCpj0YiMvgGcnmc9ZmZWOt9JbWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpap0YCQtL2kbdL0uyWNlNQ+/9LMzKycShlB3A90lNQLmEbhq0CvzrMoMzMrv1ICQhGxEvg48MuIOAHYp5SNS5or6V+SZkmakdq6SbpH0vPpvWtql6TLJFVLelLS0M39UGZm1nwlBYSk4cBngL+ktqZ8l/UHI2JIRNR9N/V4YFpEDKQwIhmf2o8GBqbXOGBiE/ZhZmYtrJSA+BpwLnB7RDwtaQ/gvmbscxQwOU1PBo4var8mCh4Cukjq2Yz9mJlZMzQ6EoiI+ymch6ibfxE4s8TtB3C3pAB+ExGTgN0i4tW0rVcl7Zr69gLmF61bk9peLd6gpHEURhj07du3xDLMzKypGg0ISe8GvgX0L+4fEUeWsP1DImJBCoF7JD3b0K4y2mKjhkLITAKoqqraaLmZmbWMUs4l/AG4ArgSWNeUjUfEgvS+SNLtwEHAQkk90+ihJ7Aoda8B+hSt3htY0JT9mZlZyynlHMTaiJgYEY9ExGN1r8ZWSvdP7Fg3DXwYeAqYCoxJ3cYAU9L0VOCUdDXTMGBZ3aEoMzNrfaWMIP4s6TTgduDtusaIeKOR9XYDbpdUt58bIuIuSY8Ct0gaC7wMjE797wSOAaqBlRTutzAzszIpJSDq/rd/dlFbAHs0tFI6mb1fRvtiYERGewCnl1CPmZm1glKuYhrQGoWYmVllKeUqpvbAV4DDU9N0CpesrsmxLjMzK7NSDjFNBNoDl6f5z6W2L+ZVlJmZlV8pAXFgRBSfS7hX0hN5FWRmZpWhlMtc10nas24mPWqjSfdDmJnZlqeUEcTZwH2SXqRwt3M/fAmqmdlWr5SrmKZJGggMohAQz0bE242sZmZmW7hNBoSkIyPiXkkfr7doT0lExG0512ZmZmXU0AjiA8C9wHEZywJwQJiZbcU2GRARcX6a/F5EvFS8TJJvnjMz28qVchXTHzPabm3pQszMrLI0dA5ib+A9wM71zkPsBHTMuzAzMyuvhs5BDAI+BnRhw/MQy4Ev5VmUmZmVX0PnIKYAUyQNj4gHW7EmMzOrAKWcg/iypC51M5K6SvpdjjWZmVkFKCUg9o2IpXUzEbEE2D+/kszMrBKUEhDbSOpaNyOpG6U9osPMzLZgpfyh/xnwgKS6S1tHAxflV5KZmVWCRkcQEXEN8AlgIbAI+HhEXFvqDiS1kzRT0h1pfoCkhyU9L+lmSR1S+3Zpvjot7785H8jMzFrGJgNC0k7pvRvwGnADcD3wWmor1deA2UXzPwIujYiBwBJgbGofCyyJiL2AS1M/MzMrk4ZGEDek98eAGUWvuvlGSeoNHAtcmeYFHMn6O7EnA8en6VFpnrR8ROpvZmZl0NB9EB9L78157tLPgXOAHdN8d2BpRKxN8zVArzTdC5if9rlW0rLU//XiDUoaB4wD6Nu3bzNKMzOzhjT0qI2hDa0YEY83tFzSx4BFEfGYpCPqmrM2VcKy4v1OAiYBVFVVbbTczMxaRkNXMf0svXcEqoAnKPwR3xd4GDi0kW0fAoyUdEzaxk4URhRdJG2bRhG9gQWpfw3QB6iRtC2wM/BGkz+RmZm1iE2eg4iID0bEB4F5wNCIqIqIAyjcJFfd2IYj4tyI6B0R/YGTgHsj4jPAfcAnU7cxwJQ0PTXNk5bfGxEeIZiZlUkpN8rtHRH/qpuJiKeAIc3Y57eBsyRVUzjHcFVqvwrontrPAsY3Yx9mZtZMpdwoN1vSlcB1FM4JfJYNL1ttVERMB6an6ReBgzL6rKZwE56ZmVWAUgLiVOArFO5nALgfmJhbRWZmVhEaDYiIWC3pCuDOiJjTCjWZmVkFaPQchKSRwCzgrjQ/RNLUvAszM7PyKuUk9fkUzhksBYiIWUD/HGsyM7MKUEpArI2IZblXYmZmFaWUk9RPSfo00E7SQOBM4IF8yzIzs3IrZQTxVeA9wNsUHuC3DPh6nkWZmVn5NTiCkNQOuCAizgb+u3VKMjOzStDgCCIi1gEHtFItZmZWQUo5BzEzXdb6B+CtusaIuC23qszMrOxKCYhuwGIKX/RTJwAHhJnZVqyUgDg7Il5vvJuZmW1NGvpO6uMk1QJPSqqR9P5WrMvMzMqsoZPUFwGHRcTuwCeAH7ZOSWZmVgkaCoi1EfEsQEQ8zPrvlTYzszagoXMQu0o6a1PzEXFJfmWZmVm5NRQQv2XDUUP9eTMz24ptMiAi4oLWLMTMzCpLKc9i2iySOkp6RNITkp6WdEFqHyDpYUnPS7pZUofUvl2ar07L++dVm5mZNS63gKDwcL8jI2I/YAjwUUnDgB8Bl0bEQGAJMDb1HwssiYi9gEtTPzMzK5PcAiIKVqTZ9ukVFO7IvjW1TwaOT9Oj0jxp+QhJyqs+MzNrWClfOfo/RdPbNWXjktpJmgUsAu4BXgCWRsTa1KUG6JWmewHzAdLyZUD3jG2OkzRD0oza2tqmlGNmZk3Q0J3U50gaDnyyqPnBpmw8ItZFxBCgN4WvLR2c1a1ulw0sK97mpIioioiqHj16NKUcMzNrgoZGEHOA0cAekv4uaRLQXdKgpu4kIpYC04FhQBdJdVdP9QYWpOkaoA9AWr4z8EZT92VmZi2joYBYApwHVANHAJel9vGSGv3KUUk9JHVJ052Ao4DZwH2sH5WMAaak6alpnrT83ojYaARhZmato6Eb5T4KnA/sCVwCPAG8FRGnlrjtnsDk9K102wC3RMQdkp4BbpJ0ITATuCr1vwq4VlI1hZHDSU3+NGZm1mIaulHuPABJTwDXAfsDPST9g8LlqMc1tOGIeDKtU7/9RQrnI+q3r6ZwSMvMzCpAKd8H8beIeBR4VNJXIuJQSbvkXZiZmZVXo5e5RsQ5RbOfT23+AiEzs61ck26Ui4gn8irEzMwqS56P2jAzsy2YA8LMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwylXKjnLUFE3YudwWVZcKycldgVnYeQZiZWaY2O4LoP/4v5S6hosztWO4KzKzSeARhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWKbeAkNRH0n2SZkt6WtLXUns3SfdIej69d03tknSZpGpJT0oamldtZmbWuDxHEGuBb0bEYGAYcLqkfYDxwLSIGAhMS/MARwMD02scMDHH2szMrBG5BUREvBoRj6fp5cBsoBcwCpicuk0Gjk/To4BrouAhoIuknnnVZ2ZmDWuVcxCS+gP7Aw8Du0XEq1AIEWDX1K0XML9otZrUZmZmZZB7QEjaAfgj8PWIeLOhrhltkbG9cZJmSJpRW1vbUmWamVk9uQaEpPYUwuH6iLgtNS+sO3SU3hel9hqgT9HqvYEF9bcZEZMioioiqnr06JFf8WZmbVyeVzEJuAqYHRGXFC2aCoxJ02OAKUXtp6SrmYYBy+oORZmZWevL82muhwCfA/4laVZqOw+4GLhF0ljgZWB0WnYncAxQDawETs2xNjMza0RuARER/yD7vALAiIz+AZyeVz1mZtY0vpPazMwyOSDMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwyOSDMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMwskwPCzMwyOSDMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMwsU24BIel3khZJeqqorZukeyQ9n967pnZJukxStaQnJQ3Nqy4zMytNniOIq4GP1msbD0yLiIHAtDQPcDQwML3GARNzrMvMzEqQW0BExP3AG/WaRwGT0/Rk4Pii9mui4CGgi6SeedVmZmaNa+1zELtFxKsA6X3X1N4LmF/Urya1mZlZmVTKSWpltEVmR2mcpBmSZtTW1uZclplZ29XaAbGw7tBRel+U2muAPkX9egMLsjYQEZMioioiqnr06JFrsWZmbVlrB8RUYEyaHgNMKWo/JV3NNAxYVncoyszMymPbvDYs6UbgCGAXSTXA+cDFwC2SxgIvA6NT9zuBY4BqYCVwal51mZlZaXILiIg4eROLRmT0DeD0vGoxM7Omq5ST1GZmVmEcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkcEGZmlskBYWZmmRwQZmaWyQFhZmaZHBBmZpbJAWFmZpkqKiAkfVTSHEnVksaXux4zs7asYgJCUjvg18DRwD7AyZL2KW9VZmZtV8UEBHAQUB0RL0bEv4GbgFFlrsnMrM3attwFFOkFzC+arwEOrt9J0jhgXJpdIWlOK9S21RPsArxe7joqxgUqdwVWj39H62ne72i/UjpVUkBkfdrYqCFiEjAp/3LaFkkzIqKq3HWYbYp/R1tfJR1iqgH6FM33BhaUqRYzszavkgLiUWCgpAGSOgAnAVPLXJOZWZtVMYeYImKtpDOAvwHtgN9FxNNlLqst8WE7q3T+HW1litjoML+ZmVlFHWIyM7MK4oAwM7NMDog2TtLvJC2S9FS5azHLIqmPpPskzZb0tKSvlbumtsLnINo4SYcDK4BrIuK95a7HrD5JPYGeEfG4pB2Bx4DjI+KZMpe21fMIoo2LiPuBN8pdh9mmRMSrEfF4ml4OzKbw5AXLmQPCzLYYkvoD+wMPl7eStsEBYWZbBEk7AH8Evh4Rb5a7nrbAAWFmFU9SewrhcH1E3FbuetoKB4SZVTRJAq4CZkfEJeWupy1xQLRxkm4EHgQGSaqRNLbcNZnVcwjwOeBISbPS65hyF9UW+DJXMzPL5BGEmZllckCYmVkmB4SZmWVyQJiZWSYHhJmZZXJAmJVA0oom9J0g6Vt5bd+stTggzMwskwPCbDNJOk7Sw5JmSvpfSbsVLd5P0r2Snpf0paJ1zpb0qKQnJV1QhrLNSuaAMNt8/wCGRcT+wE3AOUXL9gWOBYYD35W0u6QPAwOBg4AhwAHp+zjMKtK25S7AbAvWG7g5faFNB+ClomVTImIVsErSfRRC4VDgw8DM1GcHCoFxf+uVbFY6B4TZ5vslcElETJV0BDChaFn9Z9gEIOCHEfGb1inPrHl8iMls8+0MvJKmx9RbNkpSR0ndgSOAR4G/AV9I32uApF6Sdm2tYs2ayiMIs9J0llRTNH8JhRHDHyS9AjwEDCha/gjwF6Av8P2IWAAskDQYeLDwBGtWAJ8FFuVfvlnT+WmuZmaWyYeYzMwskwPCzMwyOSDMzCyTA8LMzDI5IMzMLJMDwszMMjkgzMws0/8DZOE0CX06nUcAAAAASUVORK5CYII=\n", "text/plain": [ "
" ] }, - "metadata": { - "needs_background": "light" - }, + "metadata": {}, "output_type": "display_data" } ], @@ -735,19 +655,17 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 15, "metadata": {}, "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAG3hJREFUeJzt3XmYZVV57/Hvj0EQZRAaDVNsURzAKEFUSExUMLkKCMQLCQYjGJSgMZqYYHDIg8aokFwnohEJGHCKCA6gkmt8GESjELuZZJBLS0BaUGRGpgC+94+9ihyK3VWnuuvUqa7+fp6nnt577bX3fs/qqvOetfbe66SqkCRpsrXGHYAkaX4yQUiSepkgJEm9TBCSpF4mCElSLxOEJKmXCUKrJMm7knxmJfc9OMl3pth+TpLXtuUDk/z7Sp7nsiQvWpl9V/J8leQpc71v2//tSY5f2f17jveLJNu25ROT/N0sHvvYJH8zW8fT7DNBrIGSXJPknvbH/7Mk/5LkseOOaypV9dmq+t3p6vW9iVXVDlV1zkzOl2Rxe7NeZ4ahjkxLmPcmuTPJHUmWJjkiyXoTdarqfVX12iGPNW29qnpsVV09C7E/4sNAVR1WVe9Z1WNrdEwQa66XV9VjgZ2A5wLvnFwhHX9H5pc3VtWGwBbAXwIHAGckyWyeZD4lRo2Pf/xruKr6CfBvwDPhoU+W703yH8DdwLZJtkxyepJbkixL8rpJh1k/ycntk+0FSZ49saF9wv1R23Z5kt+btG+S/GOS25P8MMnufXEOfgJtietDSW5s+12S5JlJDgUOBN7aekdfbfWvSfKStrx2G4aZiGlpkm1m0mZJnpfke0luS3JDko8medSkanskuTrJTUn+YTDRJvnjJFckuTXJN5I8cSbnB6iqu1qvaG9gV2DPduyHhvySrJ/kM0lubrF+P8kTkrwX+C3go62dPtrqV5I/TXIVcNVA2eCQ16Ik32xt962J2Pt6XBO9lCTPAI4Fdm3nu61tf1hvL8nr2u/XLe33bcuBbZXksCRXtXb72GwnRT2SCWIN194c9wAuHCj+I+BQYEPgWuBfgeXAlsB+wPsmvZHvA5wCbAp8DvhKknXbth/RvRltDLwb+EySLQb2fT5wNbAIOBL4UpJNpwn7d4HfBp4KbAL8AXBzVR0HfBb4+zY08vKefd8CvLK95o2AP6ZLhDPxIPAXLeZdgd2BN0yq83vAznQ9tH3aeUiyL/B24BXA5sC36dp3pVTVj4EldG082UF07b4NsBlwGHBPVb2jnfeNrZ3eOLDPvnT/J9uv4JQHAu+he+0X0bX3dDFe0c79vXa+TSbXSbIb8H7g9+l6R9cCn59UbS+63u6zW73/Nd25tWpMEGuur7RPct8BvgW8b2DbiVV1WVU9APwK8ALgr6vq3qq6CDieLolMWFpVp1bV/cAHgfWBXQCq6pSqur6qfllVJ9N9Mn3ewL43Ah+uqvvb9itpn4ancD9d8no6kKq6oqpuGPJ1vxZ4Z1VdWZ2Lq+rmIfelvaalVXVeVT1QVdcAnwBeOKna0VV1S3sD/zBdUgL4E+D9LeYH6Np9x5XpRQy4ni45T3Y/XWJ4SlU92OK+Y5pjvb/Ffc8Ktn+9qs6tqvuAd9D1CmbUA1uBA4FPVtUF7dhva8dePFDnqKq6rbXp2cCOs3BeTcEEsebat6o2qaonVtUbJr0hXDewvCVwS1XdOVB2LbBVX/2q+iX/09sgyauTXNSGOG6jG8paNLDvT+rhM0ZeO7HvilTVWcBHgY8BP0tyXJKNpnvBzTZ0vZqVluSpSb6W5KdJ7qB7k180qdpgGw6+picCHxloj1uA8PD2nKmt2nEm+zTwDeDzSa5P8vcDPbsVuW7Y7VX1i3beKf+/hrQlXTsNHvtmHt4uPx1YvhuY1zdWLAQmCPUZfMO+Htg0yYYDZb8K/GRg/aFPkG2sfWvg+vap+J+BNwKbtaGFS+neECdsNWks+VfbOacOsOqYqnoOsAPdUNPhPbH3uQ548nTHn8bHgR8C21XVRnRDRpPHwwc/VQ++puuAP2nJeeLn0VX13ZUJpH16fw7dkNHDtF7Zu6tqe+A36IZoXj2xeQWHnK79Bv+vH0vXc7keuKsVbzBQ91dmcNzr6ZLnxLEfQ9f7+ckK99DImSA0paq6Dvgu8P520fNZwCE8fOz5OUle0S5Q/jlwH3Ae8Bi6N4afAyR5De1i+IDHA29Ksm6S/YFnAGdMFVOS5yZ5fvs0fBdwL911AYCfAdtOsfvxwHuSbNcudj8ryWZT1F+vve6Jn7XohrfuAH6R5OnA63v2OzzJ49ob+JuBk1v5scDbkuzQXsvG7XXPSJINkrwQOA34T3raLMmLk/xakrVbvPczfDutyB5JXtAuyr8HOL+qrquqn9O9mb8q3Y0Af8zDE/HPgK17LuZP+BzwmiQ7prtt933t2NesRIyaJSYIDeOVwGK6T3lfBo6sqm8ObD+N7kLxrXTXJl7RPr1eDnwA+B7dG8SvAf8x6djnA9sBNwHvBfYb4prARnQ9k1vphiVuBv5P23YCsH0bwvlKz74fBL4A/Dvdm+YJwKOnONcvgHsGfnYD/gr4Q+DOFsfJPfudBiylu5D79XYequrLwNF0wz530PWoXjbN6x300SR30rXnh4EvAi9tQ3uT/QpwanudV9Bda5p4qPEjwH7tjqBjZnD+z9HdTHALXc/lwIFtr6Pryd1M17Mb7BWdBVwG/DTJTZMPWlVnAn/TXs8NdMnlgBnEpRGIXxgkSepjD0KS1MsEIUnqZYKQJPUyQUiSeq3WE3ItWrSoFi9ePO4wJGm1snTp0puqavPp6q3WCWLx4sUsWbJk3GFI0molybXT13KISZK0AiYISVIvE4QkqZcJQpLUywQhSeplgpAk9TJBSJJ6mSAkSb1MEJKkXqv1k9Q/+MntLD7i6yu9/zVH7TmL0UjSwmIPQpLUywQhSeplgpAk9TJBSJJ6mSAkSb1MEJKkXiYISVIvE4QkqZcJQpLUywQhSeplgpAk9TJBSJJ6mSAkSb1MEJKkXiYISVIvE4QkqZcJQpLUywQhSeplgpAk9TJBSJJ6mSAkSb1MEJKkXiYISVIvE4QkqdfIE0SStZNcmORrbf1JSc5PclWSk5M8qpWv19aXte2LRx2bJGnF5qIH8WbgioH1o4EPVdV2wK3AIa38EODWqnoK8KFWT5I0JiNNEEm2BvYEjm/rAXYDTm1VTgL2bcv7tHXa9t1bfUnSGIy6B/Fh4K3AL9v6ZsBtVfVAW18ObNWWtwKuA2jbb2/1HybJoUmWJFny4N23jzJ2SVqjjSxBJNkLuLGqlg4W91StIbb9T0HVcVW1c1XtvPYGG89CpJKkPuuM8Ni/CeydZA9gfWAjuh7FJknWab2ErYHrW/3lwDbA8iTrABsDt4wwPknSFEbWg6iqt1XV1lW1GDgAOKuqDgTOBvZr1Q4CTmvLp7d12vazquoRPQhJ0twYx3MQfw28JckyumsMJ7TyE4DNWvlbgCPGEJskqRnlENNDquoc4Jy2fDXwvJ469wL7z0U8kqTp+SS1JKmXCUKS1MsEIUnqZYKQJPUyQUiSepkgJEm9TBCSpF4mCElSLxOEJKmXCUKS1MsEIUnqZYKQJPUyQUiSepkgJEm9TBCSpF4mCElSLxOEJKmXCUKS1MsEIUnqZYKQJPUyQUiSepkgJEm9TBCSpF4mCElSLxOEJKmXCUKS1MsEIUnqZYKQJPUyQUiSepkgJEm9TBCSpF4mCElSr2kTRJLHJFmrLT81yd5J1h19aJKkcRqmB3EusH6SrYAzgdcAJ44yKEnS+A2TIFJVdwOvAP6xqn4P2H60YUmSxm2oBJFkV+BA4OutbJ0hdlo/yX8muTjJZUne3cqflOT8JFclOTnJo1r5em19Wdu+eOVekiRpNgyTIN4MvA34clVdlmRb4Owh9rsP2K2qng3sCLw0yS7A0cCHqmo74FbgkFb/EODWqnoK8KFWT5I0JtMmiKo6t6r2rqqj2/rVVfWmIfarqvpFW123/RSwG3BqKz8J2Lct79PWadt3T5KhX4kkaVYNM1T0VOCvgMWD9atqtyH2XRtYCjwF+BjwI+C2qnqgVVkObNWWtwKua8d+IMntwGbATUO+FknSLJo2QQCnAMcCxwMPzuTgVfUgsGOSTYAvA8/oq9b+7est1OSCJIcChwKsvdHmMwlHkjQDwySIB6rq46tykqq6Lck5wC7AJknWab2IrYHrW7XlwDbA8iTrABsDt/Qc6zjgOID1ttjuEQlEkjQ7hrlI/dUkb0iyRZJNJ36m2ynJ5q3nQJJHAy8BrqC7wL1fq3YQcFpbPr2t07afVVUmAEkak2F6EBNv2ocPlBWw7TT7bQGc1K5DrAV8oaq+luRy4PNJ/g64EDih1T8B+HSSZXQ9hwOGfA2SpBGYNkFU1ZNW5sBVdQnw6z3lVwPP6ym/F9h/Zc4lSZp9w9zFtC7weuC3W9E5wCeq6v4RxiVJGrNhhpg+TvcMwz+19T9qZa8dVVCSpPEbJkE8tz0NPeGsJBePKiBJ0vwwzF1MDyZ58sRKm2pjRs9DSJJWP8P0IA4Hzk5yNd3DbE+km/JbkrSADXMX05lJtgOeRpcgflhV9408MknSWK0wQSTZrarOSvKKSZuenISq+tKIY5MkjdFUPYgXAmcBL+/ZVoAJQpIWsBUmiKo6si3+bVX91+C2JCv18JwkafUxzF1MX+wpO7WnTJK0gEx1DeLpwA7AxpOuQ2wErD/qwCRJ4zXVNYinAXsBm/Dw6xB3Aq8bZVCSpPGb6hrEacBpSXatqu/NYUySpHlgmGsQh018rwNAkscl+eQIY5IkzQPDJIhnVdVtEytVdSs903hLkhaWYRLEWkkeN7HSvk1umCk6JEmrsWHe6D8AfDfJxK2t+wPvHV1IkqT5YJi5mD6VZAmwG91cTK+oqstHHpkkaaymeg5io6q6ow0p/RT43MC2TavqlrkIUJI0HlP1ID5H9xzEUrq5lyakrW87wrgkSWM21XMQe7V/nXdJktZAUw0x7TTVjlV1weyHI0maL6YaYvpA+3d9YGfgYrrhpWcB5wMvGG1okqRxWuFzEFX14qp6MXAtsFNV7VxVz6F7SG7ZXAUoSRqPYR6Ue3pV/WBipaouBXYcXUiSpPlgmAflrkhyPPAZuruXXgVcMdKoJEljN0yCeA3weuDNbf1c4OMji0iSNC8M8yT1vUmOBc6oqivnICZJ0jww7TWIJHsDFwH/t63vmOT0UQcmSRqvYS5SHwk8D7gNoKouAhaPMCZJ0jwwTIJ4oKpuH3kkkqR5ZZiL1Jcm+UNg7STbAW8CvjvasCRJ4zZMD+LPgB2A++gm8Lsd+PNRBiVJGr8pexBJ1gbeXVWHA++Ym5AkSfPBlD2IqnoQeM4cxSJJmkeGuQZxYbut9RTgronCqvrSyKKSJI3dMNcgNgVupvvK0Ze3n72m2ynJNknOTnJFksuSvLmVb5rkm0muav8+rpUnyTFJliW5ZLrpxiVJozVMD+LwqrppJY79APCXVXVBkg2BpUm+CRwMnFlVRyU5AjgC+GvgZcB27ef5dNN5PH8lzitJmgUr7EEkeXmSnwOXJFme5DdmcuCqumHiS4Wq6k66Cf62AvYBTmrVTgL2bcv7AJ+qznnAJkm2mNnLkSTNlqmGmN4L/FZVbQn8b+D9K3uSJIvpvkfifOAJVXUDdEkEeHyrthVw3cBuy1vZ5GMdmmRJkiUP3u3ze5I0KlMliAeq6ocAVXU+sOHKnCDJY4EvAn9eVXdMVbWnrB5RUHVc+/KindfeYOOVCUmSNISprkE8PslbVrReVR+c7uBJ1qVLDp8duOvpZ0m2qKob2hDSja18ObDNwO5bA9cP8yIkSbNvqh7EP9P1GiZ+Jq9PKUmAE4ArJiWT04GD2vJBwGkD5a9udzPtAtw+MRQlSZp7K+xBVNW7V/HYvwn8EfCDJBe1srcDRwFfSHII8GNg/7btDGAPuu+7vpvui4okSWMyzG2uK6WqvkP/dQWA3XvqF/Cno4pHkjQzwzwoJ0laA5kgJEm9hvnK0XcOLK832nAkSfPFVE9SvzXJrsB+A8XfG31IkqT5YKqL1FfS3WG0bZJv002VsVmSp1XVlXMSnSRpbKYaYrqV7rbUZcCLgGNa+RFJ/MpRSVrgpupBvBQ4Engy8EHgYuCuqvL5BElaA6ywB1FVb6+q3YFrgM/QJZPNk3wnyVfnKD5J0pgM86DcN6rq+8D3k7y+ql6QZNGoA5Mkjde0t7lW1VsHVg9uZSvzBUKSpNXIjB6Uq6qLRxWIJGl+8UlqSVIvE4QkqZcJQpLUywQhSeplgpAk9TJBSJJ6mSAkSb1MEJKkXiYISVIvE4QkqZcJQpLUywQhSeplgpAk9TJBSJJ6mSAkSb2G+Ua5BWvxEV9f5WNcc9SesxCJJM0/9iAkSb1MEJKkXiYISVIvE4QkqZcJQpLUywQhSeplgpAk9TJBSJJ6mSAkSb1GliCSfDLJjUkuHSjbNMk3k1zV/n1cK0+SY5IsS3JJkp1GFZckaTij7EGcCLx0UtkRwJlVtR1wZlsHeBmwXfs5FPj4COOSJA1hZAmiqs4FbplUvA9wUls+Cdh3oPxT1TkP2CTJFqOKTZI0vbm+BvGEqroBoP37+Fa+FXDdQL3lrewRkhyaZEmSJQ/efftIg5WkNdl8uUidnrLqq1hVx1XVzlW189obbDzisCRpzTXXCeJnE0NH7d8bW/lyYJuBelsD189xbJKkAXOdIE4HDmrLBwGnDZS/ut3NtAtw+8RQlCRpPEb2hUFJ/hV4EbAoyXLgSOAo4AtJDgF+DOzfqp8B7AEsA+4GXjOquCRJwxlZgqiqV65g0+49dQv401HFIkmauflykVqSNM+YICRJvUwQkqReJghJUi8ThCSplwlCktTLBCFJ6mWCkCT1MkFIknqZICRJvUwQkqReJghJUi8ThCSplwlCktRrZNN9rykWH/H1Vdr/mqP2nKVIJGl22YOQJPUyQUiSepkgJEm9TBCSpF4mCElSLxOEJKmXCUKS1MvnICRpnphvz1XZg5Ak9TJBSJJ6OcS0mptvXVJJC4cJYsxW9Q1+PpzfJCMtTA4xSZJ62YOQJByu7WOC0Nj5h7nqbMOFMVw735ggtMoW4h+G5p6/R/OPCUKaB3xz1HzkRWpJUi97EFrjOX5vD0b9TBBa7fnmJo2GQ0ySpF7zqgeR5KXAR4C1geOr6qgxhyRNyx6MFqp504NIsjbwMeBlwPbAK5NsP96oJGnNNW8SBPA8YFlVXV1V/w18HthnzDFJ0hprPg0xbQVcN7C+HHj+5EpJDgUObav3XXv0XpfOQWzz2SLgpnEHMQ/YDrYBrOFtkKMfWpyuHZ44zPHmU4JIT1k9oqDqOOA4gCRLqmrnUQc2n9kGHdvBNgDbYMJstcN8GmJaDmwzsL41cP2YYpGkNd58ShDfB7ZL8qQkjwIOAE4fc0yStMaaN0NMVfVAkjcC36C7zfWTVXXZNLsdN/rI5j3boGM72AZgG0yYlXZI1SOG+SVJmldDTJKkecQEIUnqtVokiCQvTXJlkmVJjujZvl6Sk9v285MsnvsoR2uINnhLksuTXJLkzCRD3ee8OpmuDQbq7ZekkizI2x2HaYckv99+Hy5L8rm5jnHUhvh7+NUkZye5sP1N7DGOOEcpySeT3Jik91mwdI5pbXRJkp1mfJKqmtc/dBesfwRsCzwKuBjYflKdNwDHtuUDgJPHHfcY2uDFwAZt+fVrYhu0ehsC5wLnATuPO+4x/S5sB1wIPK6tP37ccY+hDY4DXt+WtweuGXfcI2iH3wZ2Ai5dwfY9gH+je8ZsF+D8mZ5jdehBDDMFxz7ASW35VGD3JH0P3q2upm2Dqjq7qu5uq+fRPUeykAw7Fct7gL8H7p3L4ObQMO3wOuBjVXUrQFXdOMcxjtowbVDARm15YxbgM1VVdS5wyxRV9gE+VZ3zgE2SbDGTc6wOCaJvCo6tVlSnqh4Abgc2m5Po5sYwbTDoELpPDgvJtG2Q5NeBbarqa3MZ2Bwb5nfhqcBTk/xHkvPaLMkLyTBt8C7gVUmWA2cAfzY3oc0rM33feIR58xzEFIaZgmOoaTpWY0O/viSvAnYGXjjSiObelG2QZC3gQ8DBcxXQmAzzu7AO3TDTi+h6kt9O8syqum3Esc2VYdrglcCJVfWBJLsCn25t8MvRhzdvrPL74urQgxhmCo6H6iRZh65LOVXXa3Uz1DQkSV4CvAPYu6rum6PY5sp0bbAh8EzgnCTX0I25nr4AL1QP+/dwWlXdX1X/BVxJlzAWimHa4BDgCwBV9T1gfboJ7NYkqzx90eqQIIaZguN04KC2vB9wVrWrNAvEtG3Qhlc+QZccFtqYM0zTBlV1e1UtqqrFVbWY7jrM3lW1ZDzhjswwfw9fobtpgSSL6Iacrp7TKEdrmDb4MbA7QJJn0CWIn89plON3OvDqdjfTLsDtVXXDTA4w74eYagVTcCT5W2BJVZ0OnEDXhVxG13M4YHwRz74h2+AfgMcCp7Tr8z+uqr3HFvQsG7INFrwh2+EbwO8muRx4EDi8qm4eX9Sza8g2+Evgn5P8Bd2wysEL7EMjSf6VbhhxUbvWciSwLkBVHUt37WUPYBlwN/CaGZ9jgbWZJGmWrA5DTJKkMTBBSJJ6mSAkSb1MEJKkXiYISVIvE4TWOEkeTHJRkkuTnJJkgxnu/4sZ1j8xyX495TsnOaYtH5zko235sCSvHijfcibnk2aLCUJronuqaseqeibw38Bhgxvbg0Uj/9uoqiVV9aae8mOr6lNt9WDABKGxMEFoTfdt4ClJFie5Isk/ARcA2yR5ZZIftJ7G0YM7JflAkgvad29s3spel+T7SS5O8sVJPZOXJPl2kv+XZK9W/0VJHjGxYJJ3Jfmr1uvYGfhs6/HsmeTLA/V+J8mXZr9JpI4JQmusNm/Xy4AftKKn0U2P/OvA/cDRwG7AjsBzk+zb6j0GuKCqdgK+RfcEK8CXquq5VfVs4Aq6+YAmLKabQHFP4Ngk608XX1WdCiwBDqyqHemejH3GREKiezL2X2b8wqUhmSC0Jnp0kovo3nx/TDdVC8C1bd58gOcC51TVz9sU8p+l+4IWgF8CJ7flzwAvaMvPbL2EHwAHAjsMnPMLVfXLqrqKbl6kp8806DZVxKfpprHeBNiVhTetu+aReT8XkzQC97RP5A9p81fdNVg0g+NNzFdzIrBvVV2c5GC6eXIm11nR+rD+Bfgq3RcindKSlzQS9iCkfucDL0yyKMnadN8v8K22bS26WYMB/hD4TlveELghybp0PYhB+ydZK8mT6b4q88oh47izHReAqrqebsrmd9IlJGlk7EFIParqhiRvA86m602cUVWntc13ATskWUr37YV/0Mr/hi6xXEt3XWPDgUNeSZdgngAcVlX3DvmtuCfSXbO4B9i1qu6hG+7avKouX4WXKE3L2Vyl1Ux7XuLCqjph2srSKjBBSKuR1mu5C/idBfitgZpnTBCSpF5epJYk9TJBSJJ6mSAkSb1MEJKkXiYISVKv/w+ArkMmSrytZAAAAABJRU5ErkJggg==\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAHpRJREFUeJzt3XmYXFWd//H3h4AgsgQIaAjRiAYRGIkYEMYNwVHAJeiAgiiLaERl1FFRcHnUQRScQZRRwQgILiiLC1FxlB+LiAqaQFgjPyICCYkgW1gCDAmf+eOe1qK53V2d9K2qdH9ez9NP33vuube+ddKpb51z7iLbRERE9LdGtwOIiIjelAQRERG1kiAiIqJWEkRERNRKgoiIiFpJEBERUSsJIlaJpM9I+u5K7nuwpMsG2X6JpHeW5QMk/WolX+d6SbuuzL4r+XqW9NxO71v2/7ikU1Z2/5rjPShpy7J8uqTPjeCxT5b0qZE6Xoy8JIgxSNItkh4u//nvkPQtSet1O67B2P6e7VcPVa/uQ8z2trYvGc7rSZpSPqzXHGaojSkJ8xFJD0i6X9JcSUdKWruvju3P235nm8casp7t9WzfPAKxP+nLgO3DbB+9qseO5iRBjF2vt70esAOwI/DJ/hVUyd9Ibznc9vrARODDwH7A+ZI0ki/SS4kxuif/+cc427cDvwC2g79/szxG0m+BZcCWkjaXNFvSPZIWSHpXv8OsI+ms8s32Sknb920o33D/XLbdIOmN/faVpP+WtFTSnyTtXhdn6zfQkrhOkHRn2e8aSdtJmgkcAHy09I5+WurfIulVZXlcGYbpi2mupMnDaTNJO0n6vaT7JC2R9FVJT+lXbS9JN0u6S9J/tiZaSe+QNF/SvZJ+KelZw3l9ANsPlV7RG4BdgNeWY/99yE/SOpK+K+nuEusfJT1d0jHAy4Cvlnb6aqlvSe+TdBNwU0tZ65DXBEkXlLb7dV/sdT2uvl6KpOcDJwO7lNe7r2x/Qm9P0rvK39c95e9t85ZtlnSYpJtKu31tpJNiPFkSxBhXPhz3Aq5qKX47MBNYH7gV+D6wCNgc2Af4fL8P8hnAOcDGwJnATyStVbb9merDaEPgs8B3JU1s2ffFwM3ABODTwI8kbTxE2K8GXg5sBYwH3gLcbXsW8D3gi2Vo5PU1+34I2L+85w2Ad1AlwuFYAfx7iXkXYHfgvf3qvBGYTtVDm1FeB0l7Ax8H3gRsCvyGqn1Xiu3bgDlUbdzfQVTtPhnYBDgMeNj2J8rrHl7a6fCWffam+jfZZoCXPAA4muq9z6Nq76FinF9e+/fl9cb3ryNpN+ALwJupeke3Aj/oV+11VL3d7Uu91wz12rFqkiDGrp+Ub3KXAb8GPt+y7XTb19teDjwDeCnwMduP2J4HnEKVRPrMtX2u7ceALwHrADsD2D7H9mLbj9s+i+qb6U4t+94JfNn2Y2X7jZRvw4N4jCp5bQ3I9nzbS9p83+8EPmn7Rleutn13m/tS3tNc25fbXm77FuAbwCv6VTvO9j3lA/zLVEkJ4N3AF0rMy6nafdrK9CJaLKZKzv09RpUYnmt7RYn7/iGO9YUS98MDbP+57UttPwp8gqpXMKwe2AAOAE6zfWU59lHl2FNa6hxr+77SphcD00bgdWMQSRBj1962x9t+lu339vtAWNiyvDlwj+0HWspuBSbV1bf9OP/obSDpQEnzyhDHfVRDWRNa9r3dT7xj5K19+w7E9kXAV4GvAXdImiVpg6HecDGZqlez0iRtJelnkv4q6X6qD/kJ/aq1tmHre3oW8JWW9rgHEE9sz+GaVI7T33eAXwI/kLRY0hdbenYDWdjudtsPltcd9N+rTZtTtVPrse/mie3y15blZUBPn1gxGiRBRJ3WD+zFwMaS1m8peyZwe8v6379BlrH2LYDF5VvxN4HDgU3K0MJ1VB+IfSb1G0t+ZnnNwQO0T7T9ImBbqqGmI2pir7MQeM5Qxx/CScCfgKm2N6AaMuo/Ht76rbr1PS0E3l2Sc9/PU23/bmUCKd/eX0Q1ZPQEpVf2WdvbAP9MNURzYN/mAQ45VPu1/luvR9VzWQw8VIrXban7jGEcdzFV8uw79tOoej+3D7hHNC4JIgZleyHwO+ALZdLzBcChPHHs+UWS3lQmKD8IPApcDjyN6oPhbwCSDqFMhrfYDHi/pLUk7Qs8Hzh/sJgk7SjpxeXb8EPAI1TzAgB3AFsOsvspwNGSppbJ7hdI2mSQ+muX9933swbV8Nb9wIOStgbeU7PfEZI2Kh/gHwDOKuUnA0dJ2ra8lw3L+x4WSetKegVwHvAHatpM0isl/ZOkcSXex2i/nQayl6SXlkn5o4ErbC+0/TeqD/O3qToR4B08MRHfAWxRM5nf50zgEEnTVJ22+/ly7FtWIsYYIUkQ0Y79gSlU3/J+DHza9gUt28+jmii+l2pu4k3l2+sNwPHA76k+IP4J+G2/Y18BTAXuAo4B9mljTmADqp7JvVTDEncD/1W2nQpsU4ZwflKz75eAs4FfUX1ongo8dZDXehB4uOVnN+AjwFuBB0ocZ9Xsdx4wl2oi9+fldbD9Y+A4qmGf+6l6VHsO8X5bfVXSA1Tt+WXgh8AeZWivv2cA55b3OZ9qrqnvosavAPuUM4JOHMbrn0l1MsE9VD2XA1q2vYuqJ3c3Vc+utVd0EXA98FdJd/U/qO0LgU+V97OEKrnsN4y4ogHKA4MiIqJOehAREVErCSIiImolQURERK0kiIiIqLVa35BrwoQJnjJlSrfDiIhYrcydO/cu25sOVW+1ThBTpkxhzpw53Q4jImK1IunWoWtliCkiIgaQBBEREbWSICIiolYSRERE1EqCiIiIWkkQERFRKwkiIiJqJUFEREStJIiIiKi1Wl9Jfe3tS5ly5M9X6Ri3HPvaEYomImJ0SQ8iIiJqNZYgyvN7/yDpaknXS/psKT9d0l8kzSs/00q5JJ0oaYGkayTt0FRsERExtCaHmB4FdrP9YHm4/GWSflG2HWH73H7196R6NvFU4MXASeV3RER0QWM9CFceLKtrlZ/BHoA9A/h22e9yYLykiU3FFxERg2t0DkLSOEnzgDuBC2xfUTYdU4aRTpC0dimbBCxs2X1RKet/zJmS5kias2LZ0ibDj4gY0xpNELZX2J4GbAHsJGk74Chga2BHYGPgY6W66g5Rc8xZtqfbnj5u3Q0bijwiIjpyFpPt+4BLgD1sLynDSI8C3wJ2KtUWAZNbdtsCWNyJ+CIi4smaPItpU0njy/JTgVcBf+qbV5AkYG/gurLLbODAcjbTzsBS20uaii8iIgbX5FlME4EzJI2jSkRn2/6ZpIskbUo1pDQPOKzUPx/YC1gALAMOaTC2iIgYQmMJwvY1wAtryncboL6B9zUVT0REDE+upI6IiFpJEBERUSsJIiIiaiVBRERErSSIiIiolQQRERG1kiAiIqJWEkRERNRKgoiIiFpJEBERUSsJIiIiaiVBRERErSSIiIiolQQRERG1kiAiIqJWEkRERNRKgoiIiFpJEBERUSsJIiIiaiVBRERErcYShKR1JP1B0tWSrpf02VL+bElXSLpJ0lmSnlLK1y7rC8r2KU3FFhERQ2uyB/EosJvt7YFpwB6SdgaOA06wPRW4Fzi01D8UuNf2c4ETSr2IiOiSxhKEKw+W1bXKj4HdgHNL+RnA3mV5RlmnbN9dkpqKLyIiBtfoHISkcZLmAXcCFwB/Bu6zvbxUWQRMKsuTgIUAZftSYJOaY86UNEfSnBXLljYZfkTEmNZogrC9wvY0YAtgJ+D5ddXK77regp9UYM+yPd329HHrbjhywUZExBN05Cwm2/cBlwA7A+MlrVk2bQEsLsuLgMkAZfuGwD2diC8iIp6sybOYNpU0viw/FXgVMB+4GNinVDsIOK8szy7rlO0X2X5SDyIiIjpjzaGrrLSJwBmSxlElorNt/0zSDcAPJH0OuAo4tdQ/FfiOpAVUPYf9GowtIiKG0FiCsH0N8MKa8pup5iP6lz8C7NtUPBERMTy5kjoiImolQURERK0kiIiIqJUEERERtZIgIiKiVhJERETUSoKIiIhaSRAREVErCSIiImolQURERK0kiIiIqJUEERERtZIgIiKiVhJERETUSoKIiIhaSRAREVErCSIiImolQURERK0kiIiIqJUEERERtRpLEJImS7pY0nxJ10v6QCn/jKTbJc0rP3u17HOUpAWSbpT0mqZii4iIoa3Z4LGXAx+2faWk9YG5ki4o206w/V+tlSVtA+wHbAtsDvw/SVvZXtFgjBERMYDGehC2l9i+siw/AMwHJg2yywzgB7Yftf0XYAGwU1PxRUTE4DoyByFpCvBC4IpSdLikaySdJmmjUjYJWNiy2yJqEoqkmZLmSJqzYtnSBqOOiBjbGk8QktYDfgh80Pb9wEnAc4BpwBLg+L6qNbv7SQX2LNvTbU8ft+6GDUUdERGNJghJa1Elh+/Z/hGA7Ttsr7D9OPBN/jGMtAiY3LL7FsDiJuOLiIiBDZkgJD1N0hpleStJbygf/EPtJ+BUYL7tL7WUT2yp9kbgurI8G9hP0tqSng1MBf7Q/luJiIiR1M5ZTJcCLytzBRcCc4C3AAcMsd9LgLcD10qaV8o+DuwvaRrV8NEtwLsBbF8v6WzgBqozoN6XM5giIrqnnQQh28skHQr8t+0vSrpqqJ1sX0b9vML5g+xzDHBMGzFFRETD2pmDkKRdqHoMPy9lTV4/ERERPaCdBPEB4Cjgx2UYaEvg4mbDioiIbhuyJ2D7Uqp5iL71m4H3NxlURER035AJQtJWwEeAKa31be/WXFgREdFt7cwlnAOcDJwC5KyiiIgxop0Esdz2SY1HEhERPaWdSeqfSnqvpImSNu77aTyyiIjoqnZ6EAeV30e0lBnYcuTDiYiIXtHOWUzP7kQgERHRW9o5i2kt4D3Ay0vRJcA3bD/WYFwREdFl7QwxnQSsBXy9rL+9lL2zqaAiIqL72kkQO9revmX9IklXNxVQRET0hnbOYloh6Tl9K+VWG7keIiJilGunB3EEcLGkm6nuzvos4JBGo4qIiK5r5yymCyVNBZ5HlSD+ZPvRxiOLiIiuGjBBSNrN9kWS3tRv03Mk0fcI0YiIGJ0G60G8ArgIeH3NNgNJEBERo9iACcL2p8vif9j+S+u28szoiIgYxdo5i+mHNWXnjnQgERHRWwabg9ga2BbYsN88xAbAOk0HFhER3TVYD+J5wOuA8VTzEH0/OwDvGurAkiZLuljSfEnXS/pAKd9Y0gWSbiq/NyrlknSipAWSrpG0w6q+uYiIWHmDzUGcB5wnaRfbv1+JYy8HPmz7SknrA3MlXQAcDFxo+1hJRwJHAh8D9gSmlp8XU93O48Ur8boRETEC2pmDOEzS+L4VSRtJOm2onWwvsX1lWX4AmA9MAmYAZ5RqZwB7l+UZwLdduRwYL2li+28lIiJGUjsJ4gW27+tbsX0v8MLhvIikKWWfK4Cn215SjrUE2KxUmwQsbNltUSnrf6yZkuZImrNi2dLhhBEREcPQToJYo2+eAKo5BNq7RUdf/fWozoT6oO37B6taU+YnFdizbE+3PX3cuhu2G0ZERAxTOx/0xwO/k9R3auu+wDHtHLw8S+KHwPdarry+Q9JE20vKENKdpXwRMLll9y2Axe28TkREjLwhexC2vw38K3AH1Yf5m2x/Z6j9JAk4FZhv+0stm2bzj8eYHgSc11J+YDmbaWdgad9QVEREdN5g10FsYPv+MqT0V+DMlm0b275niGO/hOrhQtdKmlfKPg4cC5wt6VDgNqoeCcD5wF7AAmAZuWNsRERXDTbEdCbVdRBzeeJcgMr6loMd2PZl1M8rAOxeU9/A+wY7ZkREdM5g10G8rvzOfZciIsagwYaYBr2Sue8ah4iIGJ0GG2I6vvxeB5gOXE01ZPQCqusZXtpsaBER0U0DnsVk+5W2XwncCuxQrj14EdUFbws6FWBERHRHOxfKbW372r4V29cB05oLKSIiekE7F8rNl3QK8F2qs5feRnVfpYiIGMXaSRCHAO8BPlDWL6W602pERIxiQyYI249IOhk43/aNHYgpIiJ6wJBzEJLeAMwD/qesT5M0u+nAIiKiu9qZpP40sBNwH4DtecCUBmOKiIge0E6CWG47D16IiBhj2pmkvk7SW4FxkqYC7wd+12xYERHRbe30IP4N2BZ4lOoGfkuBDzYZVEREdN+gPQhJ44DP2j4C+ERnQoqIiF4waA/C9grgRR2KJSIiekg7cxBXldNazwEe6itseYRoRESMQu0kiI2Bu4HdWsoMJEFERIxi7SSII2zf1XgkERHRUwacg5D0ekl/A66RtEjSP3cwroiI6LLBJqmPAV5me3PgX4EvDOfAkk6TdKek61rKPiPpdknzys9eLduOkrRA0o2SXjPcNxIRESNrsASx3PafAGxfAaw/zGOfDuxRU36C7Wnl53wASdsA+1Fdb7EH8PVyim1ERHTJYHMQm0n60EDrtr802IFtXyppSptxzAB+YPtR4C+SFlDd/+n3be4fEREjbLAexDepeg19P/3XV9bhkq4pQ1AblbJJwMKWOotKWUREdMmAPQjbn23g9U4CjqY6TfZo4HjgHYDqQqg7gKSZwEyAcRts2kCIEREB7d2LacTYvsP2CtuPU/VIdiqbFgGTW6puASwe4BizbE+3PX3cuhs2G3BExBjW0QQhaWLL6huBvjOcZgP7SVpb0rOBqcAfOhlbREQ8UTsXyq0USd8HdgUmSFpE9eChXSVNoxo+ugV4N4Dt6yWdDdwALAfeV+4DFRERXTJkgpD0SdufK8trlzONhmR7/5riUwepfwzVtRcREdEDBruS+qOSdgH2aSnOaacREWPEYD2IG4F9gS0l/QaYD2wi6Xm2b+xIdBER0TWDTVLfC3wcWEA1l3BiKT9SUh45GhExyg3Wg9iDamL5OcCXgKuBh2wf0onAIiKiuwbsQdj+uO3dqc42+i5VMtlU0mWSftqh+CIiokvaOc31l7b/CPxR0ntsv1TShKYDi4iI7hryQjnbH21ZPbiU5QFCERGj3LCupLZ9dVOBREREb+norTYiImL1kQQRERG1kiAiIqJWYzfrW11MOfLnq7T/Lce+doQiiYjoLelBRERErSSIiIiolQQRERG1kiAiIqJWEkRERNRKgoiIiFpJEBERUSsJIiIiajWWICSdJulOSde1lG0s6QJJN5XfG5VySTpR0gJJ10jaoam4IiKiPU32IE6neipdqyOBC21PBS4s6wB7AlPLz0zgpAbjioiINjSWIGxfCtzTr3gGcEZZPgPYu6X8265cDoyXNLGp2CIiYmidnoN4uu0lAOX3ZqV8ErCwpd6iUvYkkmZKmiNpzoplSxsNNiJiLOuVSWrVlLmuou1Ztqfbnj5u3Q0bDisiYuzqdIK4o2/oqPy+s5QvAia31NsCWNzh2CIiokWnE8Rs4KCyfBBwXkv5geVspp2BpX1DURER0R2NPQ9C0veBXYEJkhYBnwaOBc6WdChwG7BvqX4+sBewAFgGHNJUXBER0Z7GEoTt/QfYtHtNXQPvayqWiIgYvl6ZpI6IiB6TBBEREbXG/DOpV1WeaR0Ro1V6EBERUSsJIiIiaiVBRERErSSIiIiolQQRERG1kiAiIqJWEkRERNRKgoiIiFpJEBERUSsJIiIiaiVBRERErSSIiIiolQQRERG1kiAiIqJWEkRERNRKgoiIiFpJEBERUasrT5STdAvwALACWG57uqSNgbOAKcAtwJtt39uN+CIioruPHH2l7bta1o8ELrR9rKQjy/rHuhNa56zqI0shjy2NiGb00jOpZwC7luUzgEsYAwkiIqJVLz3nvltzEAZ+JWmupJml7Om2lwCU35vV7ShppqQ5kuasWLa0Q+FGRIw93epBvMT2YkmbARdI+lO7O9qeBcwCWHviVDcVYETEWNeVHoTtxeX3ncCPgZ2AOyRNBCi/7+xGbBERUel4gpD0NEnr9y0DrwauA2YDB5VqBwHndTq2iIj4h24MMT0d+LGkvtc/0/b/SPojcLakQ4HbgH27EFtERBQdTxC2bwa2rym/G9i90/FERES9XEkdERG1kiAiIqJWEkRERNTqpSupIyJWayNx65xekgQxCvTSpfkRMXokQUTEiBgNX1RGWw9gVWUOIiIiaiVBRERErQwxxagwGoY3InpNehAREVErPYjo+sRcvr0H5OmKvSgJIoIMUUXUSYKIrut2DyZGj/wtjazMQURERK30ICJGQIaoYjRKgojoAb0wNJIkFf1liCkiImolQURERK0kiIiIqJUEERERtXpuklrSHsBXgHHAKbaP7XJIEWNCL0yUR2/pqR6EpHHA14A9gW2A/SVt092oIiLGpp5KEMBOwALbN9v+X+AHwIwuxxQRMSb12hDTJGBhy/oi4MWtFSTNBGaW1UdvPe5113Uotl42Abir20F0WdogbdBnTLeDjgOGboNntXOsXksQqinzE1bsWcAsAElzbE/vRGC9LO2QNoC0QZ+0w8i1Qa8NMS0CJresbwEs7lIsERFjWq8liD8CUyU9W9JTgP2A2V2OKSJiTOqpISbbyyUdDvyS6jTX02xfP8guszoTWc9LO6QNIG3QJ+0wQm0g20PXioiIMafXhpgiIqJHJEFERESt1SJBSNpD0o2SFkg6smb72pLOKtuvkDSl81E2q402+JCkGyRdI+lCSW2d57y6GaodWurtI8mSRt3pju20gaQ3l7+H6yWd2ekYm9bG/4dnSrpY0lXl/8Re3YizSZJOk3SnpNprwVQ5sbTRNZJ2GPaL2O7pH6rJ6j8DWwJPAa4GtulX573AyWV5P+CsbsfdhTZ4JbBuWX7PaGuDdtuh1FsfuBS4HJje7bi78LcwFbgK2Kisb9btuLvQBrOA95TlbYBbuh13A+3wcmAH4LoBtu8F/ILq+rKdgSuG+xqrQw+indtvzADOKMvnArtLqrvobnU1ZBvYvtj2srJ6OdU1JKNNu7diORr4IvBIJ4PrkHba4F3A12zfC2D7zg7H2LR22sDABmV5Q0bh9VS2LwXuGaTKDODbrlwOjJc0cTivsTokiLrbb0waqI7t5cBSYJOORNcZ7bRBq0OpvjmMNkO2g6QXApNt/6yTgXVQO38LWwFbSfqtpMvLHZJHk3ba4DPA2yQtAs4H/q0zofWU4X5uPElPXQcxgCFvv9FmndVZ2+9P0tuA6cArGo2oOwZtB0lrACcAB3cqoC5o529hTaphpl2pepK/kbSd7fsajq1T2mmD/YHTbR8vaRfgO6UNHm8+vJ6xyp+Lq0MPop3bb/y9jqQ1qbqUg3W9Vjdt3YJE0quATwBvsP1oh2LrpKHaYX1gO+ASSbdQjbvOHmUT1e3+fzjP9mO2/wLcSJUwRot22uBQ4GwA278H1qG6gd1Yssq3LlodEkQ7t9+YDRxUlvcBLnKZpRklhmyDMrTyDarkMNrGnPsM2g62l9qeYHuK7SlUczFvsD2nO+E2op3/Dz+hOmkBSROohpxu7miUzWqnDW4DdgeQ9HyqBPG3jkbZfbOBA8vZTDsDS20vGc4Ben6IyQPcfkPSfwBzbM8GTqXqQi6g6jns172IR16bbfCfwHrAOWV+/jbbb+ha0A1osx1GtTbb4JfAqyXdAKwAjrB9d/eiHllttsGHgW9K+neqYZWDR9mXRiR9n2oYcUKZa/k0sBaA7ZOp5l72AhYAy4BDhv0ao6zNIiJihKwOQ0wREdEFSRAREVErCSIiImolQURERK0kiIiIqJUEEWOOpBWS5km6TtI5ktYd5v4PDrP+6ZL2qSmfLunEsnywpK+W5cMkHdhSvvlwXi9ipCRBxFj0sO1ptrcD/hc4rHVjubCo8f8btufYfn9N+cm2v11WDwaSIKIrkiBirPsN8FxJUyTNl/R14EpgsqT9JV1behrHte4k6XhJV5Znb2xayt4l6Y+Srpb0w349k1dJ+o2k/y/pdaX+rpKedFNBSZ+R9JHS65gOfK/0eF4r6cct9f5F0o9GvkkiKkkQMWaV+3btCVxbip5HdXvkFwKPAccBuwHTgB0l7V3qPQ240vYOwK+prmAF+JHtHW1vD8ynuh9QnylUN1B8LXCypHWGis/2ucAc4ADb06iujH1+X0KiujL2W8N+4xFtSoKIseipkuZRffjeRnWrFoBby33zAXYELrH9t3IL+e9RPaAF4HHgrLL8XeClZXm70ku4FjgA2LblNc+2/bjtm6jui7T1cIMut4r4DtVtrMcDuzA6b+sePaLn78UU0YCHyzfyvyv3r3qotWgYx+u7X83pwN62r5Z0MNV9cvrXGWi9Xd8Cfkr1MKRzSvKKaER6EBH1rgBeIWmCpHFUzxf4ddm2BtVdgwHeClxWltcHlkhai6oH0WpfSWtIeg7VozJvbDOOB8pxAbC9mOqWzZ+kSkgRjUkPIqKG7SWSjgIupupNnG/7vLL5IWBbSXOpnl74llL+KarEcivVvMb6LYe8kSrBPB04zPYjbT4V93SqOYuHgV1sP0w13LWp7RtW4S1GDCl3c41YzZTrJa6yfeqQlSNWQRJExGqk9FoeAv5llD41MHpIEkRERNTKJHVERNRKgoiIiFpJEBERUSsJIiIiaiVBRERErf8D8+iRWAcd0gQAAAAASUVORK5CYII=\n", "text/plain": [ "
" ] }, - "metadata": { - "needs_background": "light" - }, + "metadata": {}, "output_type": "display_data" } ], @@ -769,7 +687,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 16, "metadata": {}, "outputs": [ { @@ -777,8 +695,8 @@ "output_type": "stream", "text": [ " y=1 y=2 \n", - " l=1 166 80 \n", - " l=2 32 722 \n" + " l=1 181 65 \n", + " l=2 56 698 \n" ] } ], @@ -820,7 +738,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 17, "metadata": { "scrolled": false }, @@ -866,7 +784,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 18, "metadata": { "scrolled": true }, @@ -875,74 +793,74 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 32/32 [00:00<00:00, 204.03it/s]\n" + "100%|██████████| 32/32 [00:00<00:00, 95.07it/s] \n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Saving model at iteration 0 with best score 0.981\n", - "[E:0]\tTrain Loss: 0.455\tDev score: 0.981\n" + "Saving model at iteration 0 with best score 0.992\n", + "[E:0]\tTrain Loss: 0.508\tDev score: 0.992\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 32/32 [00:00<00:00, 275.81it/s]\n" + "100%|██████████| 32/32 [00:00<00:00, 99.93it/s] \n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "[E:1]\tTrain Loss: 0.412\tDev score: 0.921\n" + "[E:1]\tTrain Loss: 0.470\tDev score: 0.928\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 32/32 [00:00<00:00, 243.93it/s]\n" + "100%|██████████| 32/32 [00:00<00:00, 99.20it/s] \n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "[E:2]\tTrain Loss: 0.406\tDev score: 0.942\n" + "[E:2]\tTrain Loss: 0.465\tDev score: 0.949\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 32/32 [00:00<00:00, 241.64it/s]\n" + "100%|██████████| 32/32 [00:00<00:00, 97.21it/s] \n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "[E:3]\tTrain Loss: 0.404\tDev score: 0.920\n" + "[E:3]\tTrain Loss: 0.461\tDev score: 0.969\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 32/32 [00:00<00:00, 218.43it/s]\n" + "100%|██████████| 32/32 [00:00<00:00, 96.68it/s] \n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "[E:4]\tTrain Loss: 0.402\tDev score: 0.927\n", - "Restoring best model from iteration 0 with score 0.981\n", + "[E:4]\tTrain Loss: 0.460\tDev score: 0.954\n", + "Restoring best model from iteration 0 with score 0.992\n", "Finished Training\n", - "Confusion Matrix (Dev)\n", + "Accuracy: 0.996\n", " y=1 y=2 \n", " l=1 244 2 \n", " l=2 2 752 \n" @@ -970,7 +888,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 19, "metadata": { "scrolled": false }, @@ -980,14 +898,20 @@ "output_type": "stream", "text": [ "Label Model:\n", - "Precision: 0.757\n", - "Recall: 0.695\n", - "F1: 0.725\n", + "Precision: 0.747\n", + "Recall: 0.707\n", + "F1: 0.727\n", + " y=1 y=2 \n", + " l=1 174 59 \n", + " l=2 72 695 \n", "\n", "End Model:\n", "Precision: 0.996\n", "Recall: 0.984\n", - "F1: 0.990\n" + "F1: 0.990\n", + " y=1 y=2 \n", + " l=1 242 1 \n", + " l=2 4 753 \n" ] } ], From aa281c645675a06ca6477722c26ee937dec80278 Mon Sep 17 00:00:00 2001 From: ajratner Date: Thu, 11 Oct 2018 21:44:05 -0700 Subject: [PATCH 27/35] Fixing bug in MTClassifier.score --- metal/multitask/mt_classifier.py | 7 ++ tutorials/Multitask.ipynb | 113 +++++++++++++++++-------------- 2 files changed, 68 insertions(+), 52 deletions(-) diff --git a/metal/multitask/mt_classifier.py b/metal/multitask/mt_classifier.py index 8c93c3e9..11cedb68 100644 --- a/metal/multitask/mt_classifier.py +++ b/metal/multitask/mt_classifier.py @@ -47,6 +47,7 @@ def score( reduce="mean", break_ties="random", verbose=True, + print_confusion_matrix=False, **kwargs, ): """Scores the predictive performance of the Classifier on all tasks @@ -66,6 +67,12 @@ def score( """ Y_pred, Y = self._get_predictions(data, break_ties=break_ties, **kwargs) + # TODO: Handle multiple metrics... + metric_list = metric if isinstance(metric, list) else [metric] + if len(metric_list) > 1: + raise NotImplementedError("Multiple metrics for multi-task.") + metric = metric_list[0] + task_scores = [] for t, Y_tp in enumerate(Y_pred): score = metric_score(Y[t], Y_tp, metric, ignore_in_gold=[0]) diff --git a/tutorials/Multitask.ipynb b/tutorials/Multitask.ipynb index 29fc1a5b..7d69d37d 100644 --- a/tutorials/Multitask.ipynb +++ b/tutorials/Multitask.ipynb @@ -58,7 +58,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -99,7 +99,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -123,7 +123,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -140,7 +140,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -149,14 +149,14 @@ "text": [ "Computing O...\n", "Estimating \\mu...\n", - "[E:0]\tTrain Loss: 4.396\n", - "[E:20]\tTrain Loss: 0.614\n", - "[E:40]\tTrain Loss: 0.174\n", - "[E:60]\tTrain Loss: 0.050\n", - "[E:80]\tTrain Loss: 0.034\n", - "[E:100]\tTrain Loss: 0.029\n", - "[E:120]\tTrain Loss: 0.027\n", - "[E:140]\tTrain Loss: 0.026\n", + "[E:0]\tTrain Loss: 2.495\n", + "[E:20]\tTrain Loss: 0.364\n", + "[E:40]\tTrain Loss: 0.044\n", + "[E:60]\tTrain Loss: 0.027\n", + "[E:80]\tTrain Loss: 0.026\n", + "[E:100]\tTrain Loss: 0.025\n", + "[E:120]\tTrain Loss: 0.025\n", + "[E:140]\tTrain Loss: 0.025\n", "[E:160]\tTrain Loss: 0.025\n", "[E:180]\tTrain Loss: 0.025\n", "[E:199]\tTrain Loss: 0.025\n", @@ -170,7 +170,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -184,7 +184,7 @@ " \twith 846 stored elements in Compressed Sparse Row format>]" ] }, - "execution_count": 20, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -202,7 +202,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 6, "metadata": { "scrolled": false }, @@ -220,7 +220,7 @@ "0.9" ] }, - "execution_count": 10, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } @@ -231,7 +231,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -268,7 +268,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -312,7 +312,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 9, "metadata": { "scrolled": true }, @@ -321,74 +321,76 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 25/25 [00:00<00:00, 153.98it/s]\n" + "100%|██████████| 25/25 [00:00<00:00, 204.52it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Saving model at iteration 0 with best score 0.873\n", - "[E:0]\tTrain Loss: 2.277\tDev score: 0.873\n" + "Saving model at iteration 0 with best score 0.830\n", + "[E:0]\tTrain Loss: 2.259\tDev score: 0.830\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 25/25 [00:00<00:00, 174.00it/s]\n" + "100%|██████████| 25/25 [00:00<00:00, 189.84it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Saving model at iteration 1 with best score 0.913\n", - "[E:1]\tTrain Loss: 1.324\tDev score: 0.913\n" + "Saving model at iteration 1 with best score 0.923\n", + "[E:1]\tTrain Loss: 1.330\tDev score: 0.923\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 25/25 [00:00<00:00, 245.52it/s]\n" + "100%|██████████| 25/25 [00:00<00:00, 204.76it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "[E:2]\tTrain Loss: 1.033\tDev score: 0.913\n" + "Saving model at iteration 2 with best score 0.937\n", + "[E:2]\tTrain Loss: 1.046\tDev score: 0.937\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 25/25 [00:00<00:00, 243.85it/s]\n" + "100%|██████████| 25/25 [00:00<00:00, 186.08it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "[E:3]\tTrain Loss: 0.899\tDev score: 0.900\n" + "[E:3]\tTrain Loss: 0.905\tDev score: 0.920\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 25/25 [00:00<00:00, 229.92it/s]\n" + "100%|██████████| 25/25 [00:00<00:00, 188.75it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "[E:4]\tTrain Loss: 0.854\tDev score: 0.897\n", - "Restoring best model from iteration 1 with score 0.913\n", - "Finished Training\n" + "[E:4]\tTrain Loss: 0.860\tDev score: 0.907\n", + "Restoring best model from iteration 2 with score 0.937\n", + "Finished Training\n", + "Accuracy: 0.937\n" ] } ], @@ -412,7 +414,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 10, "metadata": { "scrolled": true }, @@ -422,10 +424,10 @@ "output_type": "stream", "text": [ "Label Model:\n", - "Accuracy: 0.877\n", + "Accuracy: 0.850\n", "\n", "End Model:\n", - "Accuracy: 0.907\n" + "Accuracy: 0.927\n" ] } ], @@ -448,16 +450,16 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 11, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Accuracy (t=0): 0.930\n", + "Accuracy (t=0): 0.950\n", "Accuracy (t=1): 0.910\n", - "Accuracy (t=2): 0.880\n" + "Accuracy (t=2): 0.920\n" ] } ], @@ -474,30 +476,30 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 12, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[array([2, 1, 1, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 1, 1,\n", - " 2, 2, 2, 2, 1, 2, 1, 1, 1, 2, 1, 1, 2, 1, 1, 2, 1, 2, 2, 1, 1, 1,\n", - " 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 1, 2, 1, 2, 2, 1, 1, 1, 2, 2, 2,\n", + "[array([2, 1, 1, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2,\n", + " 2, 2, 2, 2, 1, 2, 1, 1, 1, 2, 1, 2, 2, 1, 1, 2, 1, 2, 2, 1, 1, 1,\n", + " 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 1, 2, 1, 2, 2, 2, 1, 1, 2, 2, 2,\n", " 1, 2, 1, 1, 2, 2, 1, 2, 1, 2, 2, 1, 1, 2, 1, 1, 1, 2, 2, 2, 2, 2,\n", " 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1]),\n", - " array([3, 2, 1, 3, 2, 1, 2, 2, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 2, 1,\n", - " 3, 3, 3, 3, 2, 3, 1, 2, 3, 3, 2, 3, 3, 2, 2, 3, 2, 3, 3, 1, 1, 1,\n", - " 2, 1, 1, 2, 1, 3, 3, 2, 2, 2, 3, 2, 3, 1, 3, 3, 1, 1, 3, 3, 3, 3,\n", + " array([3, 2, 1, 3, 2, 1, 2, 2, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 3,\n", + " 3, 3, 3, 3, 2, 3, 1, 2, 2, 3, 2, 3, 3, 2, 2, 3, 2, 3, 3, 1, 1, 1,\n", + " 2, 1, 1, 2, 1, 3, 3, 2, 2, 2, 3, 2, 3, 1, 3, 3, 3, 1, 3, 3, 3, 3,\n", " 1, 3, 2, 2, 3, 3, 1, 3, 2, 3, 3, 3, 1, 3, 1, 2, 1, 3, 3, 3, 3, 3,\n", " 3, 3, 2, 1, 1, 1, 1, 3, 3, 3, 2, 2]),\n", - " array([1, 3, 3, 1, 3, 3, 3, 3, 3, 2, 1, 1, 2, 1, 2, 2, 1, 1, 3, 2, 3, 3,\n", - " 2, 1, 2, 1, 3, 2, 3, 3, 3, 2, 3, 3, 1, 3, 3, 1, 3, 1, 1, 3, 3, 3,\n", - " 3, 3, 3, 3, 3, 2, 2, 3, 3, 3, 2, 3, 2, 3, 1, 1, 3, 3, 3, 1, 1, 1,\n", - " 3, 1, 3, 3, 2, 2, 3, 2, 3, 2, 2, 3, 3, 1, 3, 3, 3, 2, 2, 2, 2, 1,\n", - " 2, 2, 3, 3, 3, 3, 3, 1, 1, 2, 3, 3])]" + " array([1, 3, 3, 1, 3, 3, 3, 3, 3, 2, 1, 1, 2, 1, 2, 2, 1, 1, 3, 3, 3, 1,\n", + " 2, 1, 2, 1, 3, 2, 3, 3, 3, 2, 3, 1, 1, 3, 3, 1, 3, 1, 1, 3, 3, 3,\n", + " 3, 3, 3, 3, 3, 2, 2, 3, 3, 3, 2, 3, 2, 3, 1, 1, 1, 3, 1, 1, 1, 1,\n", + " 3, 1, 3, 3, 1, 2, 3, 2, 3, 2, 2, 1, 3, 1, 3, 3, 3, 1, 2, 1, 2, 1,\n", + " 2, 3, 3, 3, 3, 3, 3, 1, 1, 2, 3, 3])]" ] }, - "execution_count": 17, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } @@ -506,6 +508,13 @@ "Y_p = end_model.predict(Xs[2])\n", "Y_p" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { From 67fd6f6d196c6363b7cda5f4bc861ac2fa3be31c Mon Sep 17 00:00:00 2001 From: Jared Date: Thu, 11 Oct 2018 22:48:36 -0700 Subject: [PATCH 28/35] added gpu test --- .isort.cfg | 2 +- tests/gpu/test_gpu.py | 60 ++++++++++++++++++++++++++++++ tutorials/Multitask.ipynb | 78 +++++++++++++++++++-------------------- 3 files changed, 100 insertions(+), 40 deletions(-) create mode 100644 tests/gpu/test_gpu.py diff --git a/.isort.cfg b/.isort.cfg index ee7505c0..862bdfb8 100644 --- a/.isort.cfg +++ b/.isort.cfg @@ -4,4 +4,4 @@ include_trailing_comma=True force_grid_wrap=0 combine_as_imports=True line_length=80 -known_third_party=matplotlib,networkx,nltk,numpy,pandas,scipy,setuptools,sklearn,torch,torchtext,tqdm \ No newline at end of file +known_third_party=GPUtil,matplotlib,networkx,nltk,numpy,pandas,scipy,setuptools,sklearn,torch,torchtext,tqdm \ No newline at end of file diff --git a/tests/gpu/test_gpu.py b/tests/gpu/test_gpu.py new file mode 100644 index 00000000..da221b62 --- /dev/null +++ b/tests/gpu/test_gpu.py @@ -0,0 +1,60 @@ +import os +import pickle +import unittest + +import GPUtil + +from metal.end_model import EndModel +from metal.label_model import LabelModel +from metal.utils import split_data + +# Making sure we're using GPU 0 +os.environ["CUDA_VISIBLE_DEVICES"] = "0" + + +class GPUTest(unittest.TestCase): + @unittest.skipIf( + "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", + "Skipping this test on Travis CI.", + ) + def test_gpustorage(self): + # Running basics tutorial problem + with open("tutorials/data/basics_tutorial.pkl", "rb") as f: + X, Y, L, D = pickle.load(f) + + Xs, Ys, Ls, Ds = split_data( + X, Y, L, D, splits=[0.8, 0.1, 0.1], stratify_by=Y, seed=123 + ) + + label_model = LabelModel(k=2, seed=123) + label_model.train(Ls[0], Y_dev=Ys[1], n_epochs=500, print_every=25) + Y_train_ps = label_model.predict_proba(Ls[0]) + + # Creating a really large end model to use lots of memory + end_model = EndModel([1000, 100000, 2], seed=123, use_cuda=True) + + # Getting initial GPU storage use + initial_gpu_mem = GPUtil.getGPUs()[0].memoryUsed + + # Training model + end_model.train( + (Xs[0], Y_train_ps), + dev_data=(Xs[1], Ys[1]), + l2=0.1, + batch_size=256, + n_epochs=3, + print_every=1, + validation_metric="f1", + ) + + # Final GPU storage use + final_gpu_mem = GPUtil.getGPUs()[0].memoryUsed + + # On a Titan X, this model uses ~ 3 GB of memory + gpu_mem_difference = final_gpu_mem - initial_gpu_mem + + self.assertGreater(gpu_mem_difference, 1000) + + +if __name__ == "__main__": + unittest.main() diff --git a/tutorials/Multitask.ipynb b/tutorials/Multitask.ipynb index 7d69d37d..97d01679 100644 --- a/tutorials/Multitask.ipynb +++ b/tutorials/Multitask.ipynb @@ -58,12 +58,11 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "import pickle\n", - "\n", "with open(\"data/multitask_tutorial.pkl\", 'rb') as f:\n", " Xs, Ys, Ls, Ds = pickle.load(f)" ] @@ -99,7 +98,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -123,7 +122,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -140,7 +139,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 7, "metadata": {}, "outputs": [ { @@ -149,9 +148,9 @@ "text": [ "Computing O...\n", "Estimating \\mu...\n", - "[E:0]\tTrain Loss: 2.495\n", - "[E:20]\tTrain Loss: 0.364\n", - "[E:40]\tTrain Loss: 0.044\n", + "[E:0]\tTrain Loss: 2.785\n", + "[E:20]\tTrain Loss: 0.451\n", + "[E:40]\tTrain Loss: 0.053\n", "[E:60]\tTrain Loss: 0.027\n", "[E:80]\tTrain Loss: 0.026\n", "[E:100]\tTrain Loss: 0.025\n", @@ -170,7 +169,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -184,7 +183,7 @@ " \twith 846 stored elements in Compressed Sparse Row format>]" ] }, - "execution_count": 5, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } @@ -202,7 +201,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 9, "metadata": { "scrolled": false }, @@ -220,7 +219,7 @@ "0.9" ] }, - "execution_count": 6, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } @@ -231,7 +230,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ @@ -268,7 +267,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -306,13 +305,14 @@ ], "source": [ "from metal.multitask import MTEndModel\n", - "\n", + "import torch\n", + "use_cuda = torch.cuda.is_available()\n", "end_model = MTEndModel([1000,100,10], task_graph=task_graph, seed=123)" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 12, "metadata": { "scrolled": true }, @@ -321,37 +321,37 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 25/25 [00:00<00:00, 204.52it/s]\n" + "100%|██████████| 25/25 [00:00<00:00, 161.90it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Saving model at iteration 0 with best score 0.830\n", - "[E:0]\tTrain Loss: 2.259\tDev score: 0.830\n" + "Saving model at iteration 0 with best score 0.833\n", + "[E:0]\tTrain Loss: 2.260\tDev score: 0.833\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 25/25 [00:00<00:00, 189.84it/s]\n" + "100%|██████████| 25/25 [00:00<00:00, 230.25it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Saving model at iteration 1 with best score 0.923\n", - "[E:1]\tTrain Loss: 1.330\tDev score: 0.923\n" + "Saving model at iteration 1 with best score 0.930\n", + "[E:1]\tTrain Loss: 1.334\tDev score: 0.930\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 25/25 [00:00<00:00, 204.76it/s]\n" + "100%|██████████| 25/25 [00:00<00:00, 259.97it/s]\n" ] }, { @@ -359,35 +359,35 @@ "output_type": "stream", "text": [ "Saving model at iteration 2 with best score 0.937\n", - "[E:2]\tTrain Loss: 1.046\tDev score: 0.937\n" + "[E:2]\tTrain Loss: 1.054\tDev score: 0.937\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 25/25 [00:00<00:00, 186.08it/s]\n" + "100%|██████████| 25/25 [00:00<00:00, 256.49it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "[E:3]\tTrain Loss: 0.905\tDev score: 0.920\n" + "[E:3]\tTrain Loss: 0.911\tDev score: 0.917\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 25/25 [00:00<00:00, 188.75it/s]\n" + "100%|██████████| 25/25 [00:00<00:00, 258.90it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "[E:4]\tTrain Loss: 0.860\tDev score: 0.907\n", + "[E:4]\tTrain Loss: 0.864\tDev score: 0.903\n", "Restoring best model from iteration 2 with score 0.937\n", "Finished Training\n", "Accuracy: 0.937\n" @@ -414,7 +414,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 13, "metadata": { "scrolled": true }, @@ -450,16 +450,16 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 14, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Accuracy (t=0): 0.950\n", - "Accuracy (t=1): 0.910\n", - "Accuracy (t=2): 0.920\n" + "Accuracy (t=0): 0.930\n", + "Accuracy (t=1): 0.920\n", + "Accuracy (t=2): 0.930\n" ] } ], @@ -476,7 +476,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 15, "metadata": {}, "outputs": [ { @@ -484,22 +484,22 @@ "text/plain": [ "[array([2, 1, 1, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2,\n", " 2, 2, 2, 2, 1, 2, 1, 1, 1, 2, 1, 2, 2, 1, 1, 2, 1, 2, 2, 1, 1, 1,\n", - " 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 1, 2, 1, 2, 2, 2, 1, 1, 2, 2, 2,\n", + " 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 1, 2, 1, 2, 2, 2, 1, 1, 2, 1, 2,\n", " 1, 2, 1, 1, 2, 2, 1, 2, 1, 2, 2, 1, 1, 2, 1, 1, 1, 2, 2, 2, 2, 2,\n", - " 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1]),\n", + " 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1]),\n", " array([3, 2, 1, 3, 2, 1, 2, 2, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 3,\n", " 3, 3, 3, 3, 2, 3, 1, 2, 2, 3, 2, 3, 3, 2, 2, 3, 2, 3, 3, 1, 1, 1,\n", - " 2, 1, 1, 2, 1, 3, 3, 2, 2, 2, 3, 2, 3, 1, 3, 3, 3, 1, 3, 3, 3, 3,\n", + " 2, 1, 1, 2, 1, 3, 3, 2, 2, 2, 3, 2, 3, 1, 3, 3, 3, 1, 2, 3, 3, 3,\n", " 1, 3, 2, 2, 3, 3, 1, 3, 2, 3, 3, 3, 1, 3, 1, 2, 1, 3, 3, 3, 3, 3,\n", " 3, 3, 2, 1, 1, 1, 1, 3, 3, 3, 2, 2]),\n", " array([1, 3, 3, 1, 3, 3, 3, 3, 3, 2, 1, 1, 2, 1, 2, 2, 1, 1, 3, 3, 3, 1,\n", " 2, 1, 2, 1, 3, 2, 3, 3, 3, 2, 3, 1, 1, 3, 3, 1, 3, 1, 1, 3, 3, 3,\n", - " 3, 3, 3, 3, 3, 2, 2, 3, 3, 3, 2, 3, 2, 3, 1, 1, 1, 3, 1, 1, 1, 1,\n", + " 3, 3, 3, 3, 3, 2, 2, 3, 3, 3, 2, 3, 2, 3, 1, 1, 1, 3, 3, 1, 1, 1,\n", " 3, 1, 3, 3, 1, 2, 3, 2, 3, 2, 2, 1, 3, 1, 3, 3, 3, 1, 2, 1, 2, 1,\n", " 2, 3, 3, 3, 3, 3, 3, 1, 1, 2, 3, 3])]" ] }, - "execution_count": 12, + "execution_count": 15, "metadata": {}, "output_type": "execute_result" } From 1437541302ef3efe7637351d09eb775967ead62f Mon Sep 17 00:00:00 2001 From: ajratner Date: Fri, 12 Oct 2018 07:54:52 -0700 Subject: [PATCH 29/35] Reverting to Y_p --- metal/classifier.py | 20 ++++++++++---------- metal/multitask/mt_classifier.py | 4 ++-- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/metal/classifier.py b/metal/classifier.py index a46c58b7..02bf6029 100644 --- a/metal/classifier.py +++ b/metal/classifier.py @@ -359,20 +359,20 @@ def score( scores: A (float) score or a list of such scores if kwarg metric is a list """ - Y_pred, Y = self._get_predictions(data, break_ties=break_ties, **kwargs) + Y_p, Y = self._get_predictions(data, break_ties=break_ties, **kwargs) # Evaluate on the specified metrics metric_list = metric if isinstance(metric, list) else [metric] scores = [] for metric in metric_list: - score = metric_score(Y, Y_pred, metric, ignore_in_gold=[0]) + score = metric_score(Y, Y_p, metric, ignore_in_gold=[0]) scores.append(score) if verbose: print(f"{metric.capitalize()}: {score:.3f}") # Optionally print confusion matrix if print_confusion_matrix: - confusion_matrix(Y_pred, Y, pretty_print=True) + confusion_matrix(Y_p, Y, pretty_print=True) if isinstance(scores, list) and len(scores) == 1: return scores[0] @@ -390,11 +390,11 @@ def _get_predictions(self, data, break_ties="random", **kwargs): break_ties: How to break ties when making predictions Returns: - Y_pred: A Tensor of predictions + Y_p: A Tensor of predictions Y: A Tensor of labels """ data_loader = self._create_data_loader(data) - Y_pred = [] + Y_p = [] Y = [] # Do batch evaluation by default, getting the predictions and labels @@ -407,14 +407,14 @@ def _get_predictions(self, data, break_ties="random", **kwargs): Xb = Xb.cuda() # Append predictions and labels from DataLoader - Y_pred.append( + Y_p.append( self._to_numpy( self.predict(Xb, break_ties=break_ties, **kwargs) ) ) - Y_pred = np.hstack(Y_pred) + Y_p = np.hstack(Y_p) Y = np.hstack(Y) - return Y_pred, Y + return Y_p, Y def predict(self, X, break_ties="random", **kwargs): """Predicts hard (int) labels for an input X on all tasks @@ -426,8 +426,8 @@ def predict(self, X, break_ties="random", **kwargs): Returns: An n-dim np.ndarray of predictions in {1,...k} """ - Y_pred = self._to_numpy(self.predict_proba(X, **kwargs)) - return self._break_ties(Y_pred, break_ties).astype(np.int) + Y_p = self._to_numpy(self.predict_proba(X, **kwargs)) + return self._break_ties(Y_p, break_ties).astype(np.int) def predict_proba(self, X, **kwargs): """Predicts soft probabilistic labels for an input X on all tasks diff --git a/metal/multitask/mt_classifier.py b/metal/multitask/mt_classifier.py index 11cedb68..ab6dcf30 100644 --- a/metal/multitask/mt_classifier.py +++ b/metal/multitask/mt_classifier.py @@ -65,7 +65,7 @@ def score( scores: A (float) score or a t-length list of such scores if reduce=None """ - Y_pred, Y = self._get_predictions(data, break_ties=break_ties, **kwargs) + Y_p, Y = self._get_predictions(data, break_ties=break_ties, **kwargs) # TODO: Handle multiple metrics... metric_list = metric if isinstance(metric, list) else [metric] @@ -74,7 +74,7 @@ def score( metric = metric_list[0] task_scores = [] - for t, Y_tp in enumerate(Y_pred): + for t, Y_tp in enumerate(Y_p): score = metric_score(Y[t], Y_tp, metric, ignore_in_gold=[0]) task_scores.append(score) From 20fc090c37abb07b3ab8a9b02bee9cd4353b7d9f Mon Sep 17 00:00:00 2001 From: ajratner Date: Fri, 12 Oct 2018 08:29:52 -0700 Subject: [PATCH 30/35] Cleaned up loss handling of CUDA --- metal/end_model/end_model.py | 9 +++------ metal/end_model/loss.py | 14 ++++---------- metal/multitask/mt_end_model.py | 9 +++------ 3 files changed, 10 insertions(+), 22 deletions(-) diff --git a/metal/end_model/end_model.py b/metal/end_model/end_model.py index 946d117a..c082e889 100644 --- a/metal/end_model/end_model.py +++ b/metal/end_model/end_model.py @@ -70,9 +70,7 @@ def _build(self, input_module, middle_modules, head_module): self.network = nn.Sequential(input_layer, *middle_layers, head) # Construct loss module - self.criteria = SoftCrossEntropyLoss( - reduction="sum", use_cuda=self.config["use_cuda"] - ) + self.criteria = SoftCrossEntropyLoss(reduction="sum") def _build_input_layer(self, input_module): if input_module is None: @@ -162,9 +160,8 @@ def _create_dataset(self, *data): return MetalDataset(*data) def _get_loss_fn(self): - if hasattr(self.config, "use_cuda"): - if self.config["use_cuda"]: - criteria = self.criteria.cuda() + if self.config["use_cuda"]: + criteria = self.criteria.cuda() else: criteria = self.criteria loss_fn = lambda X, Y: criteria(self.forward(X), Y) diff --git a/metal/end_model/loss.py b/metal/end_model/loss.py index d9612bb4..11be27d6 100644 --- a/metal/end_model/loss.py +++ b/metal/end_model/loss.py @@ -18,24 +18,18 @@ class SoftCrossEntropyLoss(nn.Module): target: An [n, k] float tensor of target probabilities """ - def __init__( - self, weight=None, reduction="elementwise_mean", use_cuda=False - ): + def __init__(self, weight=None, reduction="elementwise_mean"): super().__init__() assert weight is None or isinstance(weight, torch.FloatTensor) self.weight = weight self.reduction = reduction - self.use_cuda = use_cuda def forward(self, input, target): n, k = input.shape - cum_losses = torch.zeros(n) - if self.use_cuda: - cum_losses = cum_losses.cuda() + # Note that t.new_zeros, t.new_full put tensor on same device as t + cum_losses = input.new_zeros(n) for y in range(k): - cls_idx = torch.full((n,), y, dtype=torch.long) - if self.use_cuda: - cls_idx = cls_idx.cuda() + cls_idx = input.new_full((n,), y, dtype=torch.long) y_loss = F.cross_entropy(input, cls_idx, reduction="none") if self.weight is not None: y_loss = y_loss * self.weight[y] diff --git a/metal/multitask/mt_end_model.py b/metal/multitask/mt_end_model.py index b633cd3a..0e00a1b1 100644 --- a/metal/multitask/mt_end_model.py +++ b/metal/multitask/mt_end_model.py @@ -86,9 +86,7 @@ def _build(self, input_modules, middle_modules, head_modules): self.heads = self._build_task_heads(head_modules) # Construct loss module - self.criteria = SoftCrossEntropyLoss( - reduction="sum", use_cuda=self.config["use_cuda"] - ) + self.criteria = SoftCrossEntropyLoss(reduction="sum") def _build_input_layer(self, input_modules): if input_modules is None: @@ -281,9 +279,8 @@ def _preprocess_Y(self, Y, k=None): def _get_loss_fn(self): """Returns the loss function to use in the train routine""" - if hasattr(self.config, "use_cuda"): - if self.config["use_cuda"]: - criteria = self.criteria.cuda() + if self.config["use_cuda"]: + criteria = self.criteria.cuda() else: criteria = self.criteria loss_fn = lambda X, Y: sum( From f261b33bf4066bbedf6dbcb8b7d24bb115e48084 Mon Sep 17 00:00:00 2001 From: ajratner Date: Fri, 12 Oct 2018 08:30:30 -0700 Subject: [PATCH 31/35] Docstring fix --- metal/multitask/mt_classifier.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metal/multitask/mt_classifier.py b/metal/multitask/mt_classifier.py index ab6dcf30..25a0f7d2 100644 --- a/metal/multitask/mt_classifier.py +++ b/metal/multitask/mt_classifier.py @@ -52,7 +52,7 @@ def score( ): """Scores the predictive performance of the Classifier on all tasks Args: - data: either a Pytorch DataLoader or tuple supplying (X,Y): + data: either a Pytorch Dataset, DataLoader or tuple supplying (X,Y): X: The input for the predict method Y: A t-length list of [n] or [n, 1] np.ndarrays or torch.Tensors of gold labels in {1,...,K_t} From 419d844c012de6f144d689d9ade42954d3eadb49 Mon Sep 17 00:00:00 2001 From: ajratner Date: Fri, 12 Oct 2018 08:43:52 -0700 Subject: [PATCH 32/35] Cleaning up place_on_gpu function (pending GPU testing) --- metal/classifier.py | 6 +++--- metal/utils.py | 17 ++++++++++------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/metal/classifier.py b/metal/classifier.py index 02bf6029..ceea47d0 100644 --- a/metal/classifier.py +++ b/metal/classifier.py @@ -11,7 +11,7 @@ from metal.analysis import confusion_matrix from metal.metrics import metric_score -from metal.utils import Checkpointer, mt_to_cuda, recursive_merge_dicts +from metal.utils import Checkpointer, place_on_gpu, recursive_merge_dicts class Classifier(nn.Module): @@ -179,7 +179,7 @@ def _train(self, train_data, loss_fn, dev_data=None): # Moving data to GPU if self.config["use_cuda"]: - data = mt_to_cuda(data) + data = place_on_gpu(data) # Zero the parameter gradients optimizer.zero_grad() @@ -404,7 +404,7 @@ def _get_predictions(self, data, break_ties="random", **kwargs): # Optionally move to GPU if self.config["use_cuda"]: - Xb = Xb.cuda() + Xb = place_on_gpu(Xb) # Append predictions and labels from DataLoader Y_p.append( diff --git a/metal/utils.py b/metal/utils.py index 6b3491ed..f2b99927 100644 --- a/metal/utils.py +++ b/metal/utils.py @@ -367,11 +367,14 @@ def slice_data(data, indices): return outputs -def mt_to_cuda(data): - """Utility to push data from multitask data loaders to GPU""" - data[0] = data[0].cuda() - if isinstance(data[1], list): - data[1] = [d.cuda() for d in data[1]] +def place_on_gpu(data): + """Utility to place data on GPU, where data could be a torch.Tensor, a tuple + or list of Tensors, or a tuple or list of tuple or lists of Tensors""" + if isinstance(data, (list, tuple)): + for i in len(data): + data[i] = place_on_gpu(data[i]) + return data + elif isinstance(data, torch.Tensor): + return data.cuda() else: - data[1] = data[1].cuda() - return data + return ValueError(f"Data type {type(data)} not recognized.") From 607d3bd28f3d03823e8a4e09f9c25b561fd100eb Mon Sep 17 00:00:00 2001 From: ajratner Date: Fri, 12 Oct 2018 08:54:45 -0700 Subject: [PATCH 33/35] Install requirements + README note for GPU tests --- README.md | 6 ++++++ tests/gpu/requirements.txt | 1 + 2 files changed, 7 insertions(+) create mode 100644 tests/gpu/requirements.txt diff --git a/README.md b/README.md index 885c9725..09be8775 100644 --- a/README.md +++ b/README.md @@ -105,3 +105,9 @@ This will install a few additional tools that help to ensure that any commits or * [flake8](http://flake8.pycqa.org/en/latest/): PEP8 linting After running `make dev` to install the necessary tools, you can run `make check` to see if any changes you've made violate the repo standards and `make fix` to fix any related to isort/black. Fixes for flake8 violations will need to be made manually. + +### GPU Usage +MeTaL supports GPU usage, but does not include this in automatically-run tests; to run these tests, first install the requirements in `tests/gpu/requirements.txt`, then run: +``` +nosetests tests/gpu +``` \ No newline at end of file diff --git a/tests/gpu/requirements.txt b/tests/gpu/requirements.txt new file mode 100644 index 00000000..84af6d2f --- /dev/null +++ b/tests/gpu/requirements.txt @@ -0,0 +1 @@ +GPUtil \ No newline at end of file From 5646a635fd397cdef03d23df2cec608c24ea130e Mon Sep 17 00:00:00 2001 From: ajratner Date: Fri, 12 Oct 2018 09:30:49 -0700 Subject: [PATCH 34/35] Minor addition of readme to tests/gpu --- tests/gpu/README.md | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 tests/gpu/README.md diff --git a/tests/gpu/README.md b/tests/gpu/README.md new file mode 100644 index 00000000..43bfd1f5 --- /dev/null +++ b/tests/gpu/README.md @@ -0,0 +1,7 @@ +### GPU Tests + +**Note that this is not a package** (no `__init__.py` file), so that `nosetests` skips it. +To run these tests, install the `requirements.txt` and then run (from base directory): +``` +nosetests tests/gpu +``` \ No newline at end of file From 754a63f17f8e4fab53cfbab8867f695f09e6af40 Mon Sep 17 00:00:00 2001 From: ajratner Date: Fri, 12 Oct 2018 12:37:06 -0700 Subject: [PATCH 35/35] Typo fix --- metal/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metal/utils.py b/metal/utils.py index f2b99927..6e146f7a 100644 --- a/metal/utils.py +++ b/metal/utils.py @@ -371,7 +371,7 @@ def place_on_gpu(data): """Utility to place data on GPU, where data could be a torch.Tensor, a tuple or list of Tensors, or a tuple or list of tuple or lists of Tensors""" if isinstance(data, (list, tuple)): - for i in len(data): + for i in range(len(data)): data[i] = place_on_gpu(data[i]) return data elif isinstance(data, torch.Tensor):